Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Flexible setup of constraints for cooling technologies #242

Open
wants to merge 9 commits into
base: main
Choose a base branch
from
3 changes: 3 additions & 0 deletions message_ix_models/model/water/cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -267,6 +267,9 @@
# Build
build(context, scen)

# Set scenario as default
scen.set_as_default()

Check warning on line 271 in message_ix_models/model/water/cli.py

View check run for this annotation

Codecov / codecov/patch

message_ix_models/model/water/cli.py#L271

Added line #L271 was not covered by tests

# Solve
scen.solve(solve_options={"lpmethod": "4"}, case=caseName)

Expand Down
163 changes: 127 additions & 36 deletions message_ix_models/model/water/data/water_for_ppl.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
"""Prepare data for water use for cooling & energy technologies."""

from typing import Any
from typing import Any, Literal, Union

import numpy as np
import pandas as pd
Expand All @@ -26,16 +26,22 @@
data_dic = {
"geo_hpl": 1 / 0.850,
"geo_ppl": 1 / 0.385,
"gas_hpl": 1 / 0.3,
"foil_hpl": 1 / 0.25,
"nuc_hc": 1 / 0.326,
"nuc_lc": 1 / 0.326,
"solar_th_ppl": 1 / 0.385,
}

if data_dic.get(x["technology"]):
if pd.notna(x["technology"]) and x["technology"] in data_dic:
value = data_dic.get(x["technology"])
if x["value"] < 1:
value = max(x["value"], value)

Check warning on line 39 in message_ix_models/model/water/data/water_for_ppl.py

View check run for this annotation

Codecov / codecov/patch

message_ix_models/model/water/data/water_for_ppl.py#L37-L39

Added lines #L37 - L39 were not covered by tests
# for backwards compatibility
if x["level"] == "cooling":
return pd.Series((data_dic.get(x["technology"]), "dummy_supply"))
return pd.Series({"value": value, "level": "dummy_supply"})

Check warning on line 42 in message_ix_models/model/water/data/water_for_ppl.py

View check run for this annotation

Codecov / codecov/patch

message_ix_models/model/water/data/water_for_ppl.py#L42

Added line #L42 was not covered by tests
else:
return pd.Series((data_dic.get(x["technology"]), x["level"]))
return pd.Series({"value": value, "level": x["level"]})

Check warning on line 44 in message_ix_models/model/water/data/water_for_ppl.py

View check run for this annotation

Codecov / codecov/patch

message_ix_models/model/water/data/water_for_ppl.py#L44

Added line #L44 was not covered by tests
else:
return pd.Series({"value": x["value"], "level": x["level"]})

Expand Down Expand Up @@ -171,6 +177,103 @@
]


def relax_growth_constraint(
ref_hist: pd.DataFrame,
scen,
cooling_df: pd.DataFrame,
g_lo: pd.DataFrame,
constraint_type: Literal[Union["activity", "new_capacity"]],
) -> pd.DataFrame:
"""
Checks if the parent technologies are shut down and require relaxing
the growth constraint.

Parameters
----------
ref_hist : pd.DataFrame
Historical data in the reference scenario.
scen : Scenario
Scenario object to retrieve necessary parameters.
cooling_df : pd.DataFrame
DataFrame containing information on cooling technologies and their
parent technologies.
g_lo : pd.DataFrame
DataFrame containing growth constraints for each technology.
constraint_type : {"activity", "new_capacity"}
Type of constraint to check, either "activity" for operational limits or
"new_capacity" for capacity expansion limits.

Returns
-------
pd.DataFrame
Updated `g_lo` DataFrame with relaxed growth constraints.
"""
year_type = "year_act" if constraint_type == "activity" else "year_vtg"
bound_param = (
"bound_activity_up"
if constraint_type == "activity"
else "bound_new_capacity_up"
)

# keep rows with max year_type
max_year_hist = (
ref_hist.loc[ref_hist.groupby(["node_loc", "technology"])[year_type].idxmax()]
.drop(columns="unit")
.rename(columns={year_type: "hist_year", "value": "hist_value"})
)

# Step 2: Check for bound_activity_up or bound_new_capacity_up conditions
bound_up_pare = scen.par(bound_param, {"technology": cooling_df["parent_tech"]})
# Get a set with unique year_type values and order them
years = np.sort(bound_up_pare[year_type].unique())

# In max_year_hist add the next year from years matching the hist_year columns
max_year_hist["next_year"] = max_year_hist["hist_year"].apply(
lambda x: years[years > x][0] if any(years > x) else None
)

# Merge the max_year_hist with bound_up_pare
bound_up = pd.merge(bound_up_pare, max_year_hist, how="left")
# subset of first year after the historical
# if next_year = None (single year test case) bound_up1 is simply empty
bound_up1 = bound_up[bound_up[year_type] == bound_up["next_year"]]
# Categories that might break the growth constraints
bound_up1 = bound_up1[bound_up1["value"] < 0.9 * bound_up1["hist_value"]]
# not look ad sudden contraints after sthe starting year
bound_up = bound_up.sort_values(by=["node_loc", "technology", year_type])
# Check if value for a year is greater than the value of the next year
bound_up["next_value"] = bound_up.groupby(["node_loc", "technology"])[
"value"
].shift(-1)
adrivinca marked this conversation as resolved.
Show resolved Hide resolved
bound_up2 = bound_up[bound_up["value"] > 0.9 * bound_up["next_value"]]
bound_up2 = bound_up2.drop(columns=["next_value"])
# combine bound 1 and 2
combined_bound = (
pd.concat([bound_up1, bound_up2]).drop_duplicates().reset_index(drop=True)
)
# Keep only node_loc, technology, and year_type
combined_bound = combined_bound[["node_loc", "technology", year_type]]
# Add columns with value "remove" to be able to use make_matched_dfs
combined_bound["rem"] = "remove"
combined_bound.rename(columns={"technology": "parent_tech"}, inplace=True)

# map_par tec to parent tec
map_parent = cooling_df[["technology_name", "parent_tech"]]
map_parent.rename(columns={"technology_name": "technology"}, inplace=True)
# expand bound_up to all cooling technologies in map_parent
combined_bound = pd.merge(combined_bound, map_parent, how="left")
# rename tear_type to year_act, because g_lo use it
combined_bound.rename(columns={year_type: "year_act"}, inplace=True)

# Merge to g_lo to be able to remove the technologies
g_lo = pd.merge(g_lo, combined_bound, how="left")
g_lo = g_lo[g_lo["rem"] != "remove"]
# Remove column rem and parent_tech
g_lo = g_lo.drop(columns=["rem", "parent_tech"])

return g_lo


# water & electricity for cooling technologies
@minimum_version("message_ix 3.7")
def cool_tech(context: "Context") -> dict[str, pd.DataFrame]:
Expand Down Expand Up @@ -257,11 +360,9 @@
)
# cooling fraction = H_cool = Hi - 1 - Hi*(h_fg)
# where h_fg (flue gasses losses) = 0.1
ref_input["cooling_fraction"] = ref_input["value"] * 0.9 - 1
# ref_input["cooling_fraction"] = ref_input["value"] * 0.9 - 1 # probably obsolete
adrivinca marked this conversation as resolved.
Show resolved Hide resolved

ref_input[["value", "level"]] = ref_input[["technology", "value", "level"]].apply(
missing_tech, axis=1
)[["value", "level"]]
ref_input[["value", "level"]] = ref_input.apply(missing_tech, axis=1)

# Combines the input df of parent_tech with water withdrawal data
input_cool = (
Expand All @@ -276,18 +377,22 @@
# Convert year values into integers to be compatibel for model
input_cool.year_vtg = input_cool.year_vtg.astype(int)
input_cool.year_act = input_cool.year_act.astype(int)
# Drops extra technologies from the data
# Drops extra technologies from the data. backwards compatibility
input_cool = input_cool[
(input_cool["level"] != "water_supply") & (input_cool["level"] != "cooling")
]

# heat plants need no cooling
input_cool = input_cool[
~input_cool["technology_name"].str.contains("hpl", na=False)
]
input_cool = input_cool[
(input_cool["node_loc"] != f"{context.regions}_GLB")
& (input_cool["node_origin"] != f"{context.regions}_GLB")
]
# Swap node_loc if node_loc equals "{context.regions}_GLB"
input_cool.loc[input_cool["node_loc"] == f"{context.regions}_GLB", "node_loc"] = (
input_cool["node_origin"]
)
# Swap node_origin if node_origin equals "{context.regions}_GLB"
input_cool.loc[
input_cool["node_origin"] == f"{context.regions}_GLB", "node_origin"
] = input_cool["node_loc"]

input_cool["cooling_fraction"] = input_cool.apply(cooling_fr, axis=1)

Expand Down Expand Up @@ -347,7 +452,7 @@
con1 = input_cool["technology_name"].str.endswith("ot_saline", na=False)
con2 = input_cool["technology_name"].str.endswith("air", na=False)
icmse_df = input_cool[(~con1) & (~con2)]

# electricity inputs
inp = make_df(
"input",
node_loc=electr["node_loc"],
Expand Down Expand Up @@ -747,14 +852,11 @@
unit="%",
time="year",
).pipe(broadcast, year_act=info.Y, node_loc=node_region)
# Alligining certain technologies with growth constriants
g_lo.loc[g_lo["technology"].str.contains("bio_ppl|loil_ppl"), "value"] = -0.5
g_lo.loc[g_lo["technology"].str.contains("coal_ppl_u|coal_ppl"), "value"] = -0.5
g_lo.loc[
(g_lo["technology"].str.contains("coal_ppl_u|coal_ppl"))
& (g_lo["node_loc"].str.contains("CPA|PAS")),
"value",
] = -1

# relax growth constraints for activity jumps
g_lo = relax_growth_constraint(ref_hist_act, scen, cooling_df, g_lo, "activity")
# relax growth constraints for capacity jumps
g_lo = relax_growth_constraint(ref_hist_cap, scen, cooling_df, g_lo, "new_capacity")
results["growth_activity_lo"] = g_lo

# growth activity up on saline water
Expand All @@ -769,18 +871,6 @@
).pipe(broadcast, year_act=info.Y, node_loc=node_region)
results["growth_activity_up"] = g_up

# # adding initial activity
# in_lo = h_act.copy()
# in_lo.drop(columns='mode', inplace=True)
# in_lo = in_lo[in_lo['year_act'] == 2015]
# in_lo_1 = make_df('initial_activity_lo',
# node_loc=in_lo['node_loc'],
# technology=in_lo['technology'],
# time='year',
# value=in_lo['value'],
# unit='GWa').pipe(broadcast, year_act=[2015, 2020])
# results['initial_activity_lo'] = in_lo_1

return results


Expand Down Expand Up @@ -839,7 +929,8 @@

n_cool_df = scen.par("output", {"technology": non_cool_tech})
n_cool_df = n_cool_df[
(n_cool_df["node_loc"] != "R11_GLB") & (n_cool_df["node_dest"] != "R11_GLB")
(n_cool_df["node_loc"] != f"{context.regions}_GLB")
& (n_cool_df["node_dest"] != f"{context.regions}_GLB")
]
n_cool_df_merge = pd.merge(n_cool_df, non_cool_df, on="technology", how="right")
n_cool_df_merge.dropna(inplace=True)
Expand Down
11 changes: 6 additions & 5 deletions message_ix_models/model/water/data/water_supply.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@
df_sw["MSGREG"] = (
context.map_ISO_c[context.regions]
if context.type_reg == "country"
else f"{context.regions}_" + df_sw["BCU_name"].str[-3:]
else f"{context.regions}_" + df_sw["BCU_name"].str.split("|").str[-1]
)

df_sw = df_sw.set_index(["MSGREG", "BCU_name"])
Expand Down Expand Up @@ -94,10 +94,11 @@
# Reading data, the data is spatially and temporally aggregated from GHMs
df_sw["BCU_name"] = df_x["BCU_name"]

if context.type_reg == "country":
df_sw["MSGREG"] = context.map_ISO_c[context.regions]
else:
df_sw["MSGREG"] = f"{context.regions}_" + df_sw["BCU_name"].str[-3:]
df_sw["MSGREG"] = (

Check warning on line 97 in message_ix_models/model/water/data/water_supply.py

View check run for this annotation

Codecov / codecov/patch

message_ix_models/model/water/data/water_supply.py#L97

Added line #L97 was not covered by tests
context.map_ISO_c[context.regions]
if context.type_reg == "country"
else f"{context.regions}_" + df_sw["BCU_name"].str.split("|").str[-1]
)

df_sw = df_sw.set_index(["MSGREG", "BCU_name"])
df_sw.drop(columns="Unnamed: 0", inplace=True)
Expand Down
1 change: 1 addition & 0 deletions message_ix_models/model/water/report.py
Original file line number Diff line number Diff line change
Expand Up @@ -1113,6 +1113,7 @@ def report(sc: Scenario, reg: str, sdgs: bool = False) -> None:
for ur in ["urban", "rural"]:
# CHANGE TO URBAN AND RURAL POP
pop_tot = sc.timeseries(variable=("Population|" + ur.capitalize()))
# ONLY R11!!! Need to fix when updating the reporting to work with any region
pop_tot = pop_tot[-(pop_tot.region == "GLB region (R11)")]
pop_reg = np.unique(pop_tot["region"])
# need to change names
Expand Down
97 changes: 96 additions & 1 deletion message_ix_models/tests/model/water/data/test_water_for_ppl.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,17 @@
from typing import Literal

import pandas as pd
import pytest
from message_ix import Scenario

from message_ix_models import ScenarioInfo

# from message_ix_models.model.structure import get_codes
from message_ix_models.model.water.data.water_for_ppl import cool_tech, non_cooling_tec
from message_ix_models.model.water.data.water_for_ppl import (
cool_tech,
non_cooling_tec,
relax_growth_constraint,
)


@cool_tech.minimum_version
Expand Down Expand Up @@ -201,3 +207,92 @@ def test_non_cooling_tec(request, test_context):
"year_act",
]
)


# Mock function for scen.par
adrivinca marked this conversation as resolved.
Show resolved Hide resolved
class MockScenario:
def par(
self,
param: Literal["bound_activity_up", "bound_new_capacity_up"],
filters: dict,
) -> pd.DataFrame:
year_type = "year_act" if param == "bound_activity_up" else "year_vtg"

return pd.DataFrame(
{
"node_loc": ["R12_AFR", "R12_AFR", "R12_AFR"],
"technology": ["coal_ppl", "coal_ppl", "coal_ppl"],
year_type: [2030, 2040, 2050],
"value": [30, 15, 0],
}
)


@pytest.mark.parametrize(
"constraint_type, year_type",
[("activity", "year_act"), ("new_capacity", "year_vtg")],
)
def test_relax_growth_constraint(constraint_type, year_type):
# Sample data for g_lo
g_lo = pd.DataFrame(
{
"node_loc": ["R12_AFR", "R12_AFR", "R12_AFR", "R12_AFR"],
"technology": [
"coal_ppl__ot_fresh",
"coal_ppl__ot_fresh",
"coal_ppl__ot_fresh",
"gas_ppl__ot_fresh",
],
"year_act": [2030, 2040, 2050, 2030],
"time": ["year", "year", "year", "year"],
"value": [-0.05, -0.05, -0.05, -0.05],
"unit": ["%", "%", "%", "%"],
}
)

# Sample data for ref_hist
ref_hist = pd.DataFrame(
{
"node_loc": ["R12_AFR", "R12_AFR", "R12_AFR"],
"technology": ["coal_ppl", "coal_ppl", "coal_ppl"],
year_type: [2015, 2020, 2025],
"time": ["year", "year", "year"],
"value": [30, 50, 80],
"unit": ["GWa", "GWa", "GWa"],
}
)

# Sample data for cooling_df
cooling_df = pd.DataFrame(
{
"technology_name": [
"coal_ppl__ot_fresh",
"coal_ppl__ot_fresh",
"coal_ppl__ot_fresh",
],
"parent_tech": ["coal_ppl", "coal_ppl", "coal_ppl"],
}
)

# Instantiate mock scenario
scen = MockScenario()

# Call the function with mock data
result = relax_growth_constraint(ref_hist, scen, cooling_df, g_lo, constraint_type)
# reset_index to make the comparison easier
result = result.reset_index(drop=True)

# Expected result
expected_result = pd.DataFrame(
{
"node_loc": ["R12_AFR", "R12_AFR"],
"technology": ["coal_ppl__ot_fresh", "gas_ppl__ot_fresh"],
"year_act": [2050, 2030],
"time": ["year", "year"],
"value": [-0.05, -0.05],
"unit": ["%", "%"],
}
)

# Assert that the result matches the expected DataFrame
pd.testing.assert_frame_equal(result, expected_result)
Loading