Skip to content

Commit

Permalink
Feature/com building (#9)
Browse files Browse the repository at this point in the history
* change renewable directory

* incorporate com building load

* add com load electrification

* format code
  • Loading branch information
boyuan276 authored Jun 28, 2024
1 parent 07c2d09 commit fabb2d2
Show file tree
Hide file tree
Showing 9 changed files with 1,034 additions and 678 deletions.
19 changes: 11 additions & 8 deletions examples/ex_mopf_w_esr_vre.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -78,14 +78,8 @@
"results_dir = os.path.join(os.path.dirname(data_dir), 'results')\n",
"print('Results directory: {}'.format(results_dir))\n",
"\n",
"solar_data_dir = os.path.join(data_dir, 'solar')\n",
"print('Solar data directory: {}'.format(solar_data_dir))\n",
"\n",
"onshore_wind_data_dir = os.path.join(data_dir, 'onshore_wind')\n",
"print('Onshore wind data directory: {}'.format(onshore_wind_data_dir))\n",
"\n",
"offshore_wind_data_dir = os.path.join(data_dir, 'offshore_wind')\n",
"print('Offshore wind data directory: {}'.format(offshore_wind_data_dir))"
"renewable_data_dir = os.path.join(data_dir, 'renewable')\n",
"print('Renewable data directory: {}'.format(renewable_data_dir))\n"
]
},
{
Expand Down Expand Up @@ -199,6 +193,15 @@
"metadata": {},
"outputs": [],
"source": [
"solar_data_dir = os.path.join(data_dir, 'solar')\n",
"print('Solar data directory: {}'.format(solar_data_dir))\n",
"\n",
"onshore_wind_data_dir = os.path.join(data_dir, 'onshore_wind')\n",
"print('Onshore wind data directory: {}'.format(onshore_wind_data_dir))\n",
"\n",
"offshore_wind_data_dir = os.path.join(data_dir, 'offshore_wind')\n",
"print('Offshore wind data directory: {}'.format(offshore_wind_data_dir))\n",
"\n",
"vre_prop, genmax_profile_vre = read_vre_data(\n",
" solar_data_dir, onshore_wind_data_dir, offshore_wind_data_dir)"
]
Expand Down
1,407 changes: 832 additions & 575 deletions examples/ex_mopf_w_esr_vre_elec.ipynb

Large diffs are not rendered by default.

19 changes: 11 additions & 8 deletions nygrid/allocate.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ def nearest_neighbor_lat_lon(left_gdf: gpd.GeoDataFrame,
leaf_size: int = 20) -> Union[Dict, Tuple]:
"""
For each point in left_gdf, find closest point in right GeoDataFrame and return them.
NOTICE: Assumes that the input Points are in WGS84 projection (lat/lon).
Parameters
Expand Down Expand Up @@ -110,7 +110,7 @@ def nearest_neighbor_lat_lon(left_gdf: gpd.GeoDataFrame,
# Ensure that the index corresponds the one in left_gdf
closest_points = closest_points.reset_index(drop=True)

# Add distance if requested
# Add distance if requested
if return_dist:
# Convert to meters from radians
earth_radius = 6371000 # meters
Expand All @@ -125,7 +125,7 @@ def nearest_neighbor_meters(left_gdf: gpd.GeoDataFrame,
leaf_size: int = 20) -> Union[Dict, Tuple]:
"""
For each point in left_gdf, find the closest point in right GeoDataFrame and return them.
NOTICE: Assumes that the input Points are in WGS84 projection (meters).
Parameters
Expand All @@ -151,24 +151,27 @@ def nearest_neighbor_meters(left_gdf: gpd.GeoDataFrame,
# Ensure that index in right gdf is formed of sequential numbers
right = right_gdf.copy().reset_index(drop=True)

# Parse coordinates from points and insert them into a numpy array as METERS
left_meters = np.array(left_gdf[left_geom_col].apply(lambda geom: (geom.x, geom.y)).to_list())
right_meters = np.array(right[right_geom_col].apply(lambda geom: (geom.x, geom.y)).to_list())
# Parse coordinates from points and insert them into a numpy array as METERS
left_meters = np.array(left_gdf[left_geom_col].apply(
lambda geom: (geom.x, geom.y)).to_list())
right_meters = np.array(right[right_geom_col].apply(
lambda geom: (geom.x, geom.y)).to_list())

# Find the nearest points
# -----------------------
# closest ==> index in right_gdf that corresponds to the closest point
# dist ==> distance between the nearest neighbors (in meters)

closest, dist = get_nearest(src_points=left_meters, candidates=right_meters, leaf_size=leaf_size)
closest, dist = get_nearest(
src_points=left_meters, candidates=right_meters, leaf_size=leaf_size)

# Return points from right GeoDataFrame that are closest to points in left GeoDataFrame
closest_points = right.loc[closest]

# Ensure that the index corresponds the one in left_gdf
closest_points = closest_points.reset_index(drop=True)

# Add distance if requested
# Add distance if requested
if return_dist:
# Convert to meters from radians
# earth_radius = 6371000 # meters
Expand Down
21 changes: 14 additions & 7 deletions nygrid/nygrid.py
Original file line number Diff line number Diff line change
Expand Up @@ -694,7 +694,8 @@ def set_gen_mw_sch(self, gen_mw_sch: pd.DataFrame) -> None:
"""

# Slice the generation profile to the simulation period
gen_mw_sch = gen_mw_sch[self.start_datetime:self.end_datetime].to_numpy()
gen_mw_sch = gen_mw_sch[self.start_datetime:
self.end_datetime].to_numpy()

# Generator schedule in p.u.
if gen_mw_sch is not None and gen_mw_sch.size > 0:
Expand Down Expand Up @@ -724,7 +725,8 @@ def set_gen_max_sch(self, gen_max_sch: pd.DataFrame) -> None:
"""

# Slice the generator profile to the simulation period
gen_max_sch = gen_max_sch[self.start_datetime:self.end_datetime].to_numpy()
gen_max_sch = gen_max_sch[self.start_datetime:
self.end_datetime].to_numpy()

# Generator upper operating limit in p.u.
if gen_max_sch is not None and gen_max_sch.size > 0:
Expand All @@ -749,7 +751,8 @@ def set_vre_max_sch(self, vre_max_sch: pd.DataFrame) -> None:
"""

# Slice the generator profile to the simulation period
vre_max_sch = vre_max_sch[self.start_datetime:self.end_datetime].to_numpy()
vre_max_sch = vre_max_sch[self.start_datetime:
self.end_datetime].to_numpy()

# Generator upper operating limit in p.u.
if vre_max_sch is not None and vre_max_sch.size > 0:
Expand All @@ -774,7 +777,8 @@ def set_gen_min_sch(self, gen_min_sch: pd.DataFrame) -> None:
"""

# Slice the generator profile to the simulation period
gen_min_sch = gen_min_sch[self.start_datetime:self.end_datetime].to_numpy()
gen_min_sch = gen_min_sch[self.start_datetime:
self.end_datetime].to_numpy()

# Generator lower operating limit in p.u.
if gen_min_sch is not None and gen_min_sch.size > 0:
Expand Down Expand Up @@ -804,7 +808,8 @@ def set_gen_ramp_sch(self, gen_ramp_sch: pd.DataFrame,
# Convert 30min ramp rate to hourly ramp rate
if interval == '30min':
gen_ramp_sch = gen_ramp_sch * 2
gen_ramp_sch = gen_ramp_sch[self.start_datetime:self.end_datetime].to_numpy()
gen_ramp_sch = gen_ramp_sch[self.start_datetime:
self.end_datetime].to_numpy()

# Convert default value 0 (Unlimited) to 1e6
gen_ramp_sch[gen_ramp_sch == 0] = 1e6
Expand Down Expand Up @@ -841,8 +846,10 @@ def set_gen_cost_sch(self, gen_cost0_sch: pd.DataFrame,
"""

# Slice the generator profile to the simulation period
gen_cost0_sch = gen_cost0_sch[self.start_datetime:self.end_datetime].to_numpy()
gen_cost1_sch = gen_cost1_sch[self.start_datetime:self.end_datetime].to_numpy()
gen_cost0_sch = gen_cost0_sch[self.start_datetime:
self.end_datetime].to_numpy()
gen_cost1_sch = gen_cost1_sch[self.start_datetime:
self.end_datetime].to_numpy()

# Linear cost intercept coefficients in p.u.
if gen_cost0_sch is not None and gen_cost0_sch.size > 0:
Expand Down
6 changes: 4 additions & 2 deletions nygrid/optimizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -179,7 +179,8 @@ def gen_cost_ene_expr(model):
# ESR energy cost
def esr_cost_ene_expr(model):
return sum(self.nygrid.esrcost_crg[t, esr] * model.esrPCrg[t, esr]
+ self.nygrid.esrcost_dis[t, esr] * model.esrPDis[t, esr]
+ self.nygrid.esrcost_dis[t,
esr] * model.esrPDis[t, esr]
for esr in self.esrs for t in self.times)

def over_gen_penalty_expr(model):
Expand Down Expand Up @@ -439,7 +440,8 @@ def esr_soc_update_rule(model, t, esr):
if self.nygrid.esr_init is not None:
return model.esrSOC[t, esr] == self.nygrid.esr_init[esr] \
+ model.esrPCrg[t, esr] * self.nygrid.esr_crg_eff[t, esr] \
- model.esrPDis[t, esr] / self.nygrid.esr_dis_eff[t, esr]
- model.esrPDis[t, esr] / \
self.nygrid.esr_dis_eff[t, esr]
else:
return pyo.Constraint.Skip
else:
Expand Down
8 changes: 4 additions & 4 deletions nygrid/plots.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,9 +4,9 @@
from typing import List, Tuple, Optional


def plot_gen(thermal_pg: pd.Series,
gen_hist: pd.Series,
gen_max: pd.Series,
def plot_gen(thermal_pg: pd.Series,
gen_hist: pd.Series,
gen_max: pd.Series,
gen_min: pd.Series,
ax: plt.Axes,
title: Optional[str] = None) -> plt.Axes:
Expand All @@ -23,4 +23,4 @@ def plot_gen(thermal_pg: pd.Series,
if title:
ax.set_title(title)

return ax
return ax
2 changes: 1 addition & 1 deletion nygrid/postprocessing.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,4 +3,4 @@
Created: 2023-12-26, by Bo Yuan (Cornell University)
Last modified: 2023-12-26, by Bo Yuan (Cornell University)
"""
"""
82 changes: 54 additions & 28 deletions nygrid/preprocessing.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,19 +9,35 @@
import pandas as pd
from typing import List, Tuple, Optional

def agg_demand_county2bus(demand_inc_county: pd.DataFrame,
county2bus: pd.DataFrame) -> pd.DataFrame:

def agg_demand_county2bus(demand_inc_county: pd.DataFrame,
county2bus: pd.DataFrame
) -> pd.DataFrame:
"""
County-level consumption to bus-level consumption
County-level consumption to bus-level consumption.
Parameters
----------
demand_inc_county : pd.DataFrame
County-level consumption
county2bus : pd.DataFrame
County to bus mapping
Returns
-------
demand_inc_bus : pd.DataFrame
Bus-level consumption
"""

demand_inc_county_erie = demand_inc_county['Erie']
demand_inc_county_westchester = demand_inc_county['Westchester']
demand_inc_county_rest = demand_inc_county.drop(columns=['Erie', 'Westchester'])
demand_inc_county_rest = demand_inc_county.drop(
columns=['Erie', 'Westchester'])

county2bus_erie = county2bus[county2bus['NAME'] == 'Erie']
county2bus_westchester = county2bus[county2bus['NAME'] == 'Westchester']
county2bus_rest = county2bus[(county2bus['NAME'] != 'Erie') &
(county2bus['NAME'] != 'Westchester')]
county2bus_rest = county2bus[(county2bus['NAME'] != 'Erie') &
(county2bus['NAME'] != 'Westchester')]

demand_inc_bus_erie = demand_inc_county_erie.to_frame()
demand_inc_bus_erie['55'] = demand_inc_bus_erie['Erie'] * 0.5
Expand All @@ -33,35 +49,41 @@ def agg_demand_county2bus(demand_inc_county: pd.DataFrame,
demand_inc_bus_westchester = demand_inc_county_westchester.to_frame()
demand_inc_bus_westchester['74'] = demand_inc_bus_westchester['Westchester'] * 0.5
demand_inc_bus_westchester['78'] = demand_inc_bus_westchester['Westchester'] * 0.5
demand_inc_bus_westchester = demand_inc_bus_westchester.drop(columns=['Westchester'])
demand_inc_bus_westchester.columns = demand_inc_bus_westchester.columns.astype(int)
demand_inc_bus_westchester = demand_inc_bus_westchester.drop(columns=[
'Westchester'])
demand_inc_bus_westchester.columns = demand_inc_bus_westchester.columns.astype(
int)

county_bus_alloc_rest = county2bus_rest.set_index('NAME').to_dict()['busIdx']
demand_inc_bus_rest = demand_inc_county_rest.T.groupby(county_bus_alloc_rest).sum().T
county_bus_alloc_rest = county2bus_rest.set_index('NAME').to_dict()[
'busIdx']
demand_inc_bus_rest = demand_inc_county_rest.T.groupby(
county_bus_alloc_rest).sum().T

demand_inc_bus = demand_inc_bus_rest.add(demand_inc_bus_erie, fill_value=0)
demand_inc_bus = demand_inc_bus.add(demand_inc_bus_westchester, fill_value=0)
demand_inc_bus = demand_inc_bus.add(
demand_inc_bus_westchester, fill_value=0)

return demand_inc_bus


def get_res_load_change_county(county_id: str,
upgrade_id: int,
res_bldg_type_list: List[str],
resstock_bldg_proc_dir: str) -> Tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame]:
def get_building_load_change_county(county_id: str,
upgrade_id: int,
bldg_type_list: List[str],
bldg_proc_dir: str
) -> Tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame]:
"""
Read building timeseries data aggregated by county and building type
Read building timeseries data aggregated by county and building type.
Parameters
----------
county_id : str
County ID
upgrade_id : int
Upgrade ID
res_bldg_type_list : list
bldg_type_list : list
List of building types
resstock_bldg_proc_dir : str
bldg_proc_dir : str
Directory for processed building data
Returns
Expand All @@ -77,19 +99,22 @@ def get_res_load_change_county(county_id: str,
# Read building timeseries data aggregated by county and building type
first_df = True

for bldg_type in res_bldg_type_list:
for bldg_type in bldg_type_list:

filename = os.path.join(resstock_bldg_proc_dir,
filename = os.path.join(bldg_proc_dir,
f"{county_id.lower()}_{bldg_type.replace(' ', '_').lower()}.parquet")

if os.path.isfile(filename):
# Future
df_county_bldg_type_future = pd.read_parquet(filename, engine='pyarrow')
col_total_cons = [col for col in df_county_bldg_type_future.columns if col.endswith('.energy_consumption') and 'total' in col]
df_county_bldg_type_future = pd.read_parquet(
filename, engine='pyarrow')
col_total_cons = [col for col in df_county_bldg_type_future.columns if col.endswith(
'.energy_consumption') and 'total' in col]
df_county_bldg_type_future = df_county_bldg_type_future[col_total_cons]

# Baseline
df_county_bldg_type_base = pd.read_parquet(filename.replace(f'upgrade={upgrade_id}', 'upgrade=0'), engine='pyarrow')
df_county_bldg_type_base = pd.read_parquet(filename.replace(
f'upgrade={upgrade_id}', 'upgrade=0'), engine='pyarrow')
df_county_bldg_type_base = df_county_bldg_type_base[col_total_cons]

# Savings = Baseline - Future
Expand All @@ -105,15 +130,16 @@ def get_res_load_change_county(county_id: str,
df_county_base = df_county_base + df_county_bldg_type_base
df_county_future = df_county_future + df_county_bldg_type_future
df_county_saving = df_county_saving + df_county_bldg_type_saving

else:
print(f'Residential load data is not available for county {county_id} {bldg_type}. Skipping...')
print(
f'Building load data is not available for county {county_id} {bldg_type}. Skipping...')
continue

# Rename columns
col_rename = {col: col.split('.')[1] for col in df_county_base.columns}
df_county_base = df_county_base.rename(columns=col_rename)
df_county_future = df_county_future.rename(columns=col_rename)
df_county_saving = df_county_saving.rename(columns=col_rename)
df_county_saving = df_county_saving.rename(columns=col_rename)

return df_county_base, df_county_future, df_county_saving
return df_county_base, df_county_future, df_county_saving
Loading

0 comments on commit fabb2d2

Please sign in to comment.