Skip to content

Commit

Permalink
Chore: Make release 1.0.69
Browse files Browse the repository at this point in the history
  • Loading branch information
martinroberson authored and DominicCYK committed Mar 18, 2024
1 parent 4baaf77 commit 3f9bd37
Show file tree
Hide file tree
Showing 14 changed files with 1,114 additions and 127 deletions.
117 changes: 117 additions & 0 deletions gs_quant/api/gs/scenarios.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,117 @@
"""
Copyright 2024 Goldman Sachs.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
"""

import datetime as dt
import logging
from typing import Dict, List, Tuple

from gs_quant.session import GsSession
from gs_quant.target.risk import Scenario

_logger = logging.getLogger(__name__)


class GsScenarioApi:
"""GS Scenarios API client implementation"""

@classmethod
def create_scenario(cls, scenario: Scenario) -> Scenario:
return GsSession.current._post('/risk/scenarios', scenario, cls=Scenario)

@classmethod
def get_scenario(cls, scenario_id: str) -> Scenario:
return GsSession.current._get(f'/risk/scenarios/{scenario_id}', cls=Scenario)

@classmethod
def get_many_scenarios(cls,
ids: List[str] = None,
names: List[str] = None,
limit: int = 100,
**kwargs) -> Tuple[Scenario]:
url = f'/risk/scenarios?limit={limit}'
if ids:
url += f'&id={"&id=".join(ids)}'
if names:
url += f'&name={"&name=".join(names)}'
if kwargs:
for k, v in kwargs.items():
url += f'&{k}={f"&{k}=".join(v)}' if isinstance(v, list) else f'&{k}={v}'

return GsSession.current._get(url, cls=Scenario).get('results', [])

@classmethod
def get_scenario_by_name(cls, name: str) -> Scenario:
url = f"/risk/scenarios?name={name}"
ret = GsSession.current._get(url, cls=Scenario)
num_found = ret.get('totalResults', 0)

if num_found == 0:
raise ValueError(f'Scenario {name}not found')
elif num_found > 1:
raise ValueError(f'More than one scemario named {name}')
else:
return ret['results'][0]

@classmethod
def update_scenario(cls, scenario: Scenario) -> Dict:
return GsSession.current._put(f'/risk/scenarios/{scenario.get("id")}', scenario, cls=Scenario)

@classmethod
def delete_scenario(cls, scenario_id: str) -> Dict:
return GsSession.current._delete(f'/risk/scenarios/{scenario_id}')

@classmethod
def calculate_scenario(cls, request: Dict) -> Dict:
return GsSession.current._post('/scenarios/calculate', request)


class GsFactorScenarioApi(GsScenarioApi):
def __init__(self):
super().__init__()

@classmethod
def get_many_scenarios(cls,
ids: List[str] = None,
names: List[str] = None,
limit: int = 100,
type: str = None,
risk_model: str = None,
shocked_factors: List[str] = None,
shocked_factor_categories: List[str] = None,
propagated_shocks: bool = None,
start_date: dt.date = None,
end_date: dt.date = None) -> Tuple[Scenario]:
factor_scenario_args = {}
if risk_model:
factor_scenario_args['riskModel'] = risk_model
if type:
factor_scenario_args['factorScenarioType'] = type
if shocked_factors:
factor_scenario_args['shockedFactor'] = shocked_factors
if shocked_factor_categories:
factor_scenario_args['shockedFactorCategory'] = shocked_factor_categories
if propagated_shocks:
factor_scenario_args['propagatedShocks'] = propagated_shocks
if start_date:
factor_scenario_args['historicalSimulationStartDate'] = start_date
if end_date:
factor_scenario_args['historicalSimulationEndDate'] = end_date

return super().get_many_scenarios(ids=ids, names=names, limit=limit, **factor_scenario_args)

@classmethod
def calculate_scenario(cls, calculation_request: Dict) -> Dict:
return super().calculate_scenario(request=calculation_request)
8 changes: 8 additions & 0 deletions gs_quant/backtests/generic_engine.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,7 @@
from gs_quant.risk import Price
from gs_quant.risk.results import PortfolioRiskResult
from gs_quant.target.backtests import BacktestTradingQuantityType
from gs_quant.common import AssetClass
from gs_quant.tracing import Tracer

# priority set to contexts making requests to the pricing API (min. 1 - max. 10)
Expand Down Expand Up @@ -432,6 +433,13 @@ def __init__(self, action_impl_map=None):
}

def get_action_handler(self, action: Action) -> ActionHandler:
def is_eq_underlier(leg):
if hasattr(leg, 'asset_class'):
return isinstance(leg.asset_class, AssetClass) and leg.asset_class == AssetClass.Equity
return leg.__class__.__name__.lower().startswith('eq')
if isinstance(action, EnterPositionQuantityScaledAction) and \
not all([is_eq_underlier(p) for p in action.priceables]):
raise RuntimeError('EnterPositionQuantityScaledAction only supported for equity underliers')
if type(action) in self.action_impl_map:
return self.action_impl_map[type(action)](action)
raise RuntimeError(f'Action {type(action)} not supported by engine')
Expand Down
142 changes: 141 additions & 1 deletion gs_quant/entities/entity.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,7 @@
from gs_quant.api.gs.portfolios import GsPortfolioApi
from gs_quant.api.gs.reports import GsReportApi
from gs_quant.api.gs.thematics import ThematicMeasure, GsThematicApi, Region
from gs_quant.api.gs.scenarios import GsFactorScenarioApi
from gs_quant.common import DateLimit, PositionType, Currency
from gs_quant.data import DataCoordinate, DataFrequency, DataMeasure
from gs_quant.data.coordinate import DataDimensions
Expand All @@ -44,10 +45,13 @@
from gs_quant.markets.indices_utils import BasketType, IndicesDatasets
from gs_quant.markets.position_set import PositionSet
from gs_quant.markets.report import PerformanceReport, FactorRiskReport, Report, ThematicReport, \
flatten_results_into_df, get_thematic_breakdown_as_df
flatten_results_into_df, get_thematic_breakdown_as_df, ReturnFormat
from gs_quant.markets.scenario import Scenario
from gs_quant.session import GsSession
from gs_quant.target.data import DataQuery
from gs_quant.target.reports import ReportStatus, ReportType
from gs_quant.entities.entity_utils import _explode_data


_logger = logging.getLogger(__name__)

Expand All @@ -63,6 +67,7 @@ class EntityType(Enum):
RISK_MODEL = 'risk_model'
SUBDIVISION = 'subdivision'
DATASET = 'dataset'
SCENARIO = 'scenario'


@dataclass
Expand All @@ -75,6 +80,19 @@ class EntityIdentifier(Enum):
pass


class ScenarioCalculationType(Enum):
FACTOR_SCENARIO = "Factor Scenario"


class ScenarioCalculationMeasure(Enum):
SUMMARY = "Summary"
ESTIMATED_FACTOR_PNL = "Factor Pnl"
ESTIMATED_PNL_BY_SECTOR = "By Sector Pnl Aggregations"
ESTIMATED_PNL_BY_REGION = "By Region Pnl Aggregations"
ESTIMATED_PNL_BY_DIRECTION = "By Direction Pnl Aggregations"
ESTIMATED_PNL_BY_ASSET = "By Asset Pnl"


class Entity(metaclass=ABCMeta):
"""Base class for any first-class entity"""
_entity_to_endpoint = {
Expand Down Expand Up @@ -879,3 +897,125 @@ def get_thematic_breakdown(self,
:return: a Pandas DataFrame with results
"""
return get_thematic_breakdown_as_df(entity_id=self.id, date=date, basket_id=basket_id)

def get_factor_scenario_analytics(self,
scenarios: List[Scenario],
date: dt.date,
measures: List[ScenarioCalculationMeasure],
risk_model: str = None,
return_format: ReturnFormat = ReturnFormat.DATA_FRAME) -> \
Union[Dict, Union[Dict, pd.DataFrame]]:

"""Given a list of factor scenarios (historical simulation and/or custom shocks), return the estimated pnl
of the given positioned entity.
:param scenarios: List of factor-based scenarios
:param date: date to run scenarios.
:param measures: which metrics to return
:param risk_model: valid risk model ID
:param return_format: whether to return data formatted in a dataframe or as a dict
**Examples**
>>> from gs_quant.session import GsSession, Environment
>>> from gs_quant.markets.portfolio_manager import PortfolioManager, ReturnFormat
>>> from gs_quant.entities.entity import ScenarioCalculationMeasure, PositionedEntity
>>> from gs_quant.markets.scenario import Scenario
Get scenarios
>>> covid_19_omicron = Scenario.get_by_name("Covid 19 Omicron (v2)") # historical simulation
>>> custom_shock = Scenario.get_by_name("Shocking factor by x% (Propagated)") # custom shock
>>> risk_model = "RISK_MODEL_ID" # valid risk model ID
Instantiate your positionedEntity. Here, we are using one of its subclasses, PortfolioManager
>>> pm = PortfolioManager(portfolio_id="PORTFOLIO_ID")
Set the date you wish to run your scenario on
>>> date = dt.date(2023, 3, 7)
Run scenario and get estimated impact on your positioned entity
>>> scenario_analytics = pm.get_factor_scenario_analytics(
... scenarios=[covid_19_omicron, beta_propagated],
... date=date,
... measures=[ScenarioCalculationMeasure.SUMMARY,
... ScenarioCalculationMeasure.ESTIMATED_FACTOR_PNL,
... ScenarioCalculationMeasure.ESTIMATED_PNL_BY_SECTOR,
... ScenarioCalculationMeasure.ESTIMATED_PNL_BY_REGION,
... ScenarioCalculationMeasure.ESTIMATED_PNL_BY_DIRECTION,
... ScenarioCalculationMeasure.ESTIMATED_PNL_BY_ASSET],
... risk_model=risk_model)
By default, the result will be returned in a dict with keys as the measures/metrics requested and values as
the scenario calculation results formatted in a dataframe. To get the results in a dict, specify the return
format as JSON
"""
risk_report = self.get_factor_risk_report(risk_model_id=risk_model)

id_to_scenario_map = {scenario.id: scenario for scenario in scenarios}
scenario_ids = list(id_to_scenario_map.keys())

calculation_request = {
"date": date,
"scenarioIds": scenario_ids,
"reportId": risk_report.id,
"measures": [m.value for m in measures],
"riskModel": risk_model,
"type": "Factor Scenario"
}

results = GsFactorScenarioApi.calculate_scenario(calculation_request)

scenarios = [id_to_scenario_map.get(sc_id) for sc_id in results.get('scenarios')]
calculation_results = results.get('results')

if return_format == ReturnFormat.JSON:
return dict(zip(scenarios, calculation_results))

result = {}

all_data = {}
[all_data.update({result_type: []}) for result_type in ['summary', 'factorPnl', 'bySectorAggregations',
'byRegionAggregations', 'byDirectionAggregations',
'byAsset']]

for i, calc_result in enumerate(calculation_results):
all_data.get('summary').append(calc_result.get('summary'))
for result_type in ['summary', 'factorPnl', 'bySectorAggregations',
'byRegionAggregations', 'byDirectionAggregations', 'byAsset']:
scenario_metadata_map = {"scenarioId": scenarios[i].id,
"scenarioName": scenarios[i].name,
"scenarioType": scenarios[i].scenario_type}
if result_type == 'summary':
calc_result.get(result_type).update(scenario_metadata_map)
all_data.get(result_type).append(calc_result.get(result_type))
else:
[data_map.update(scenario_metadata_map) for data_map in calc_result.get(result_type, [])]
[all_data.get(result_type).append(element) for element in calc_result.get(result_type, [])]

for result_type, result_label in {"summary": "summary",
"factorPnl": "factorCategories",
"bySectorAggregations": "sectors",
"byRegionAggregations": "countries",
"byDirectionAggregations": "direction",
"byAsset": "byAsset"}.items():
estimated_pnl_results_as_json = all_data.get(result_type)
if estimated_pnl_results_as_json:
estimated_pnl_df = pd.DataFrame.from_dict(estimated_pnl_results_as_json)
estimated_pnl_df = estimated_pnl_df.apply(
_explode_data, axis=1, parent_label=result_label)
if isinstance(estimated_pnl_df, pd.Series):
estimated_pnl_df = pd.concat(estimated_pnl_df.values, ignore_index=True)

estimated_pnl_df.columns = estimated_pnl_df.columns.map(lambda x: {
"factorCategories": "factorCategory",
"factors": "factor",
"sectors": "sector",
"countries": "country",
"industries": "industry"
}.get(x, x))

result[result_type] = estimated_pnl_df

return result
57 changes: 57 additions & 0 deletions gs_quant/entities/entity_utils.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,57 @@
"""
Copyright 2024 Goldman Sachs.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
"""

from typing import Union

import pandas as pd


def _explode_data(data: pd.Series,
parent_label: str) -> Union[pd.DataFrame, pd.Series]:
parent_to_child_map = {
"factorCategories": "factors",
"factors": "byAsset",
"sectors": "industries",
"industries": None,
"countries": None,
"direction": None
}

labels_to_ignore_map = {
"factorCategories": ["factorExposure", "estimatedPnl", "factors"],
"factors": ["factorExposure", "estimatedPnl", "byAsset"],
"sectors": ["exposure", "estimatedPnl", "industries"],
"industries": [],
"countries": [],
"direction": [],
"byAsset": []
}

data = data.rename({'name': parent_label}) if parent_label in parent_to_child_map.keys() else data
child_label = parent_to_child_map.get(parent_label)

if child_label and child_label in data.index.values:
child_df = pd.DataFrame(data[child_label])
child_df = child_df.apply(_explode_data, axis=1, parent_label=child_label)

data = data.drop(labels=labels_to_ignore_map.get(parent_label))
if isinstance(child_df, pd.Series):
child_df = pd.concat(child_df.values, ignore_index=True)
child_df = child_df.assign(**data.to_dict())

return child_df

return data
4 changes: 2 additions & 2 deletions gs_quant/markets/factor.py
Original file line number Diff line number Diff line change
Expand Up @@ -138,8 +138,8 @@ def volatility(self,
limit_factors=False).get('results')

volatility_data_df = build_factor_volatility_dataframe(volatility_raw_data, True, None) * 252
if format == ReturnFormat.DATA_FRAME:
return volatility_data_df.to_dict()
if format == ReturnFormat.JSON:
return volatility_data_df.squeeze(axis=1).to_dict()

return volatility_data_df

Expand Down
Loading

0 comments on commit 3f9bd37

Please sign in to comment.