From 3f9bd37f2284e42d957e50e757bf455ac33985c3 Mon Sep 17 00:00:00 2001 From: "Roberson, Martin [GBM Public]" Date: Mon, 18 Mar 2024 17:31:08 +0000 Subject: [PATCH] Chore: Make release 1.0.69 --- gs_quant/api/gs/scenarios.py | 117 +++++++ gs_quant/backtests/generic_engine.py | 8 + gs_quant/entities/entity.py | 142 ++++++++- gs_quant/entities/entity_utils.py | 57 ++++ gs_quant/markets/factor.py | 4 +- gs_quant/markets/scenario.py | 293 ++++++++++++++++++ gs_quant/target/backtests.py | 3 +- gs_quant/target/risk.py | 117 +++++++ gs_quant/target/scenarios.py | 94 ++++++ .../test/timeseries/test_measures_fx_vol.py | 152 ++++----- .../test/timeseries/test_measures_reports.py | 112 +++++++ gs_quant/timeseries/measures_fx_vol.py | 49 +-- gs_quant/timeseries/measures_reports.py | 90 +++++- gs_quant/timeseries/measures_risk_models.py | 3 +- 14 files changed, 1114 insertions(+), 127 deletions(-) create mode 100644 gs_quant/api/gs/scenarios.py create mode 100644 gs_quant/entities/entity_utils.py create mode 100644 gs_quant/markets/scenario.py create mode 100644 gs_quant/target/scenarios.py diff --git a/gs_quant/api/gs/scenarios.py b/gs_quant/api/gs/scenarios.py new file mode 100644 index 00000000..d350b923 --- /dev/null +++ b/gs_quant/api/gs/scenarios.py @@ -0,0 +1,117 @@ +""" +Copyright 2024 Goldman Sachs. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, +software distributed under the License is distributed on an +"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +KIND, either express or implied. See the License for the +specific language governing permissions and limitations +under the License. +""" + +import datetime as dt +import logging +from typing import Dict, List, Tuple + +from gs_quant.session import GsSession +from gs_quant.target.risk import Scenario + +_logger = logging.getLogger(__name__) + + +class GsScenarioApi: + """GS Scenarios API client implementation""" + + @classmethod + def create_scenario(cls, scenario: Scenario) -> Scenario: + return GsSession.current._post('/risk/scenarios', scenario, cls=Scenario) + + @classmethod + def get_scenario(cls, scenario_id: str) -> Scenario: + return GsSession.current._get(f'/risk/scenarios/{scenario_id}', cls=Scenario) + + @classmethod + def get_many_scenarios(cls, + ids: List[str] = None, + names: List[str] = None, + limit: int = 100, + **kwargs) -> Tuple[Scenario]: + url = f'/risk/scenarios?limit={limit}' + if ids: + url += f'&id={"&id=".join(ids)}' + if names: + url += f'&name={"&name=".join(names)}' + if kwargs: + for k, v in kwargs.items(): + url += f'&{k}={f"&{k}=".join(v)}' if isinstance(v, list) else f'&{k}={v}' + + return GsSession.current._get(url, cls=Scenario).get('results', []) + + @classmethod + def get_scenario_by_name(cls, name: str) -> Scenario: + url = f"/risk/scenarios?name={name}" + ret = GsSession.current._get(url, cls=Scenario) + num_found = ret.get('totalResults', 0) + + if num_found == 0: + raise ValueError(f'Scenario {name}not found') + elif num_found > 1: + raise ValueError(f'More than one scemario named {name}') + else: + return ret['results'][0] + + @classmethod + def update_scenario(cls, scenario: Scenario) -> Dict: + return GsSession.current._put(f'/risk/scenarios/{scenario.get("id")}', scenario, cls=Scenario) + + @classmethod + def delete_scenario(cls, scenario_id: str) -> Dict: + return GsSession.current._delete(f'/risk/scenarios/{scenario_id}') + + @classmethod + def calculate_scenario(cls, request: Dict) -> Dict: + return GsSession.current._post('/scenarios/calculate', request) + + +class GsFactorScenarioApi(GsScenarioApi): + def __init__(self): + super().__init__() + + @classmethod + def get_many_scenarios(cls, + ids: List[str] = None, + names: List[str] = None, + limit: int = 100, + type: str = None, + risk_model: str = None, + shocked_factors: List[str] = None, + shocked_factor_categories: List[str] = None, + propagated_shocks: bool = None, + start_date: dt.date = None, + end_date: dt.date = None) -> Tuple[Scenario]: + factor_scenario_args = {} + if risk_model: + factor_scenario_args['riskModel'] = risk_model + if type: + factor_scenario_args['factorScenarioType'] = type + if shocked_factors: + factor_scenario_args['shockedFactor'] = shocked_factors + if shocked_factor_categories: + factor_scenario_args['shockedFactorCategory'] = shocked_factor_categories + if propagated_shocks: + factor_scenario_args['propagatedShocks'] = propagated_shocks + if start_date: + factor_scenario_args['historicalSimulationStartDate'] = start_date + if end_date: + factor_scenario_args['historicalSimulationEndDate'] = end_date + + return super().get_many_scenarios(ids=ids, names=names, limit=limit, **factor_scenario_args) + + @classmethod + def calculate_scenario(cls, calculation_request: Dict) -> Dict: + return super().calculate_scenario(request=calculation_request) diff --git a/gs_quant/backtests/generic_engine.py b/gs_quant/backtests/generic_engine.py index ffb335a4..ec31540b 100644 --- a/gs_quant/backtests/generic_engine.py +++ b/gs_quant/backtests/generic_engine.py @@ -40,6 +40,7 @@ from gs_quant.risk import Price from gs_quant.risk.results import PortfolioRiskResult from gs_quant.target.backtests import BacktestTradingQuantityType +from gs_quant.common import AssetClass from gs_quant.tracing import Tracer # priority set to contexts making requests to the pricing API (min. 1 - max. 10) @@ -432,6 +433,13 @@ def __init__(self, action_impl_map=None): } def get_action_handler(self, action: Action) -> ActionHandler: + def is_eq_underlier(leg): + if hasattr(leg, 'asset_class'): + return isinstance(leg.asset_class, AssetClass) and leg.asset_class == AssetClass.Equity + return leg.__class__.__name__.lower().startswith('eq') + if isinstance(action, EnterPositionQuantityScaledAction) and \ + not all([is_eq_underlier(p) for p in action.priceables]): + raise RuntimeError('EnterPositionQuantityScaledAction only supported for equity underliers') if type(action) in self.action_impl_map: return self.action_impl_map[type(action)](action) raise RuntimeError(f'Action {type(action)} not supported by engine') diff --git a/gs_quant/entities/entity.py b/gs_quant/entities/entity.py index 2a3dba86..ffa33787 100644 --- a/gs_quant/entities/entity.py +++ b/gs_quant/entities/entity.py @@ -36,6 +36,7 @@ from gs_quant.api.gs.portfolios import GsPortfolioApi from gs_quant.api.gs.reports import GsReportApi from gs_quant.api.gs.thematics import ThematicMeasure, GsThematicApi, Region +from gs_quant.api.gs.scenarios import GsFactorScenarioApi from gs_quant.common import DateLimit, PositionType, Currency from gs_quant.data import DataCoordinate, DataFrequency, DataMeasure from gs_quant.data.coordinate import DataDimensions @@ -44,10 +45,13 @@ from gs_quant.markets.indices_utils import BasketType, IndicesDatasets from gs_quant.markets.position_set import PositionSet from gs_quant.markets.report import PerformanceReport, FactorRiskReport, Report, ThematicReport, \ - flatten_results_into_df, get_thematic_breakdown_as_df + flatten_results_into_df, get_thematic_breakdown_as_df, ReturnFormat +from gs_quant.markets.scenario import Scenario from gs_quant.session import GsSession from gs_quant.target.data import DataQuery from gs_quant.target.reports import ReportStatus, ReportType +from gs_quant.entities.entity_utils import _explode_data + _logger = logging.getLogger(__name__) @@ -63,6 +67,7 @@ class EntityType(Enum): RISK_MODEL = 'risk_model' SUBDIVISION = 'subdivision' DATASET = 'dataset' + SCENARIO = 'scenario' @dataclass @@ -75,6 +80,19 @@ class EntityIdentifier(Enum): pass +class ScenarioCalculationType(Enum): + FACTOR_SCENARIO = "Factor Scenario" + + +class ScenarioCalculationMeasure(Enum): + SUMMARY = "Summary" + ESTIMATED_FACTOR_PNL = "Factor Pnl" + ESTIMATED_PNL_BY_SECTOR = "By Sector Pnl Aggregations" + ESTIMATED_PNL_BY_REGION = "By Region Pnl Aggregations" + ESTIMATED_PNL_BY_DIRECTION = "By Direction Pnl Aggregations" + ESTIMATED_PNL_BY_ASSET = "By Asset Pnl" + + class Entity(metaclass=ABCMeta): """Base class for any first-class entity""" _entity_to_endpoint = { @@ -879,3 +897,125 @@ def get_thematic_breakdown(self, :return: a Pandas DataFrame with results """ return get_thematic_breakdown_as_df(entity_id=self.id, date=date, basket_id=basket_id) + + def get_factor_scenario_analytics(self, + scenarios: List[Scenario], + date: dt.date, + measures: List[ScenarioCalculationMeasure], + risk_model: str = None, + return_format: ReturnFormat = ReturnFormat.DATA_FRAME) -> \ + Union[Dict, Union[Dict, pd.DataFrame]]: + + """Given a list of factor scenarios (historical simulation and/or custom shocks), return the estimated pnl + of the given positioned entity. + :param scenarios: List of factor-based scenarios + :param date: date to run scenarios. + :param measures: which metrics to return + :param risk_model: valid risk model ID + :param return_format: whether to return data formatted in a dataframe or as a dict + + **Examples** + + >>> from gs_quant.session import GsSession, Environment + >>> from gs_quant.markets.portfolio_manager import PortfolioManager, ReturnFormat + >>> from gs_quant.entities.entity import ScenarioCalculationMeasure, PositionedEntity + >>> from gs_quant.markets.scenario import Scenario + + Get scenarios + >>> covid_19_omicron = Scenario.get_by_name("Covid 19 Omicron (v2)") # historical simulation + >>> custom_shock = Scenario.get_by_name("Shocking factor by x% (Propagated)") # custom shock + >>> risk_model = "RISK_MODEL_ID" # valid risk model ID + + Instantiate your positionedEntity. Here, we are using one of its subclasses, PortfolioManager + + >>> pm = PortfolioManager(portfolio_id="PORTFOLIO_ID") + + Set the date you wish to run your scenario on + + >>> date = dt.date(2023, 3, 7) + + Run scenario and get estimated impact on your positioned entity + + >>> scenario_analytics = pm.get_factor_scenario_analytics( + ... scenarios=[covid_19_omicron, beta_propagated], + ... date=date, + ... measures=[ScenarioCalculationMeasure.SUMMARY, + ... ScenarioCalculationMeasure.ESTIMATED_FACTOR_PNL, + ... ScenarioCalculationMeasure.ESTIMATED_PNL_BY_SECTOR, + ... ScenarioCalculationMeasure.ESTIMATED_PNL_BY_REGION, + ... ScenarioCalculationMeasure.ESTIMATED_PNL_BY_DIRECTION, + ... ScenarioCalculationMeasure.ESTIMATED_PNL_BY_ASSET], + ... risk_model=risk_model) + + By default, the result will be returned in a dict with keys as the measures/metrics requested and values as + the scenario calculation results formatted in a dataframe. To get the results in a dict, specify the return + format as JSON + """ + risk_report = self.get_factor_risk_report(risk_model_id=risk_model) + + id_to_scenario_map = {scenario.id: scenario for scenario in scenarios} + scenario_ids = list(id_to_scenario_map.keys()) + + calculation_request = { + "date": date, + "scenarioIds": scenario_ids, + "reportId": risk_report.id, + "measures": [m.value for m in measures], + "riskModel": risk_model, + "type": "Factor Scenario" + } + + results = GsFactorScenarioApi.calculate_scenario(calculation_request) + + scenarios = [id_to_scenario_map.get(sc_id) for sc_id in results.get('scenarios')] + calculation_results = results.get('results') + + if return_format == ReturnFormat.JSON: + return dict(zip(scenarios, calculation_results)) + + result = {} + + all_data = {} + [all_data.update({result_type: []}) for result_type in ['summary', 'factorPnl', 'bySectorAggregations', + 'byRegionAggregations', 'byDirectionAggregations', + 'byAsset']] + + for i, calc_result in enumerate(calculation_results): + all_data.get('summary').append(calc_result.get('summary')) + for result_type in ['summary', 'factorPnl', 'bySectorAggregations', + 'byRegionAggregations', 'byDirectionAggregations', 'byAsset']: + scenario_metadata_map = {"scenarioId": scenarios[i].id, + "scenarioName": scenarios[i].name, + "scenarioType": scenarios[i].scenario_type} + if result_type == 'summary': + calc_result.get(result_type).update(scenario_metadata_map) + all_data.get(result_type).append(calc_result.get(result_type)) + else: + [data_map.update(scenario_metadata_map) for data_map in calc_result.get(result_type, [])] + [all_data.get(result_type).append(element) for element in calc_result.get(result_type, [])] + + for result_type, result_label in {"summary": "summary", + "factorPnl": "factorCategories", + "bySectorAggregations": "sectors", + "byRegionAggregations": "countries", + "byDirectionAggregations": "direction", + "byAsset": "byAsset"}.items(): + estimated_pnl_results_as_json = all_data.get(result_type) + if estimated_pnl_results_as_json: + estimated_pnl_df = pd.DataFrame.from_dict(estimated_pnl_results_as_json) + estimated_pnl_df = estimated_pnl_df.apply( + _explode_data, axis=1, parent_label=result_label) + if isinstance(estimated_pnl_df, pd.Series): + estimated_pnl_df = pd.concat(estimated_pnl_df.values, ignore_index=True) + + estimated_pnl_df.columns = estimated_pnl_df.columns.map(lambda x: { + "factorCategories": "factorCategory", + "factors": "factor", + "sectors": "sector", + "countries": "country", + "industries": "industry" + }.get(x, x)) + + result[result_type] = estimated_pnl_df + + return result diff --git a/gs_quant/entities/entity_utils.py b/gs_quant/entities/entity_utils.py new file mode 100644 index 00000000..564657cf --- /dev/null +++ b/gs_quant/entities/entity_utils.py @@ -0,0 +1,57 @@ +""" +Copyright 2024 Goldman Sachs. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, +software distributed under the License is distributed on an +"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +KIND, either express or implied. See the License for the +specific language governing permissions and limitations +under the License. +""" + +from typing import Union + +import pandas as pd + + +def _explode_data(data: pd.Series, + parent_label: str) -> Union[pd.DataFrame, pd.Series]: + parent_to_child_map = { + "factorCategories": "factors", + "factors": "byAsset", + "sectors": "industries", + "industries": None, + "countries": None, + "direction": None + } + + labels_to_ignore_map = { + "factorCategories": ["factorExposure", "estimatedPnl", "factors"], + "factors": ["factorExposure", "estimatedPnl", "byAsset"], + "sectors": ["exposure", "estimatedPnl", "industries"], + "industries": [], + "countries": [], + "direction": [], + "byAsset": [] + } + + data = data.rename({'name': parent_label}) if parent_label in parent_to_child_map.keys() else data + child_label = parent_to_child_map.get(parent_label) + + if child_label and child_label in data.index.values: + child_df = pd.DataFrame(data[child_label]) + child_df = child_df.apply(_explode_data, axis=1, parent_label=child_label) + + data = data.drop(labels=labels_to_ignore_map.get(parent_label)) + if isinstance(child_df, pd.Series): + child_df = pd.concat(child_df.values, ignore_index=True) + child_df = child_df.assign(**data.to_dict()) + + return child_df + + return data diff --git a/gs_quant/markets/factor.py b/gs_quant/markets/factor.py index c68c6255..fa9c16e4 100644 --- a/gs_quant/markets/factor.py +++ b/gs_quant/markets/factor.py @@ -138,8 +138,8 @@ def volatility(self, limit_factors=False).get('results') volatility_data_df = build_factor_volatility_dataframe(volatility_raw_data, True, None) * 252 - if format == ReturnFormat.DATA_FRAME: - return volatility_data_df.to_dict() + if format == ReturnFormat.JSON: + return volatility_data_df.squeeze(axis=1).to_dict() return volatility_data_df diff --git a/gs_quant/markets/scenario.py b/gs_quant/markets/scenario.py new file mode 100644 index 00000000..031118b4 --- /dev/null +++ b/gs_quant/markets/scenario.py @@ -0,0 +1,293 @@ +""" +Copyright 2024 Goldman Sachs. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, +software distributed under the License is distributed on an +"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +KIND, either express or implied. See the License for the +specific language governing permissions and limitations +under the License. +""" + +from gs_quant.api.gs.scenarios import GsFactorScenarioApi +from gs_quant.markets.factor import Factor +from gs_quant.target.risk import Scenario as TargetScenario, FactorScenarioType +from enum import Enum +from typing import List, Union, Dict +from pydash import get + +import datetime as dt +import pandas as pd + + +class ScenarioCalculationType(Enum): + FACTOR_SCENARIO = "Factor Scenario" + + +class ScenarioResultsMode(Enum): + POSITIONED_ENTITY = "Positioned Entity" + POSITIONS = "Positions" + + +class FactorShock: + """ Marquee Factor Shock """ + + def __init__(self, + factor: Union[str, Factor], + shock: float): + self.__factor = factor + self.__shock = shock + + @property + def factor(self) -> Union[str, Factor]: + """ Get factor being shocked""" + return self.__factor + + @factor.setter + def factor(self, factor: Union[str, Factor]): + self.__factor = factor + + @property + def shock(self) -> float: + """ Get factor being shocked""" + return self.__shock + + @shock.setter + def shock(self, shock: float): + self.__shock = shock + + def to_dict(self): + return { + "factor": self.factor.name if isinstance(self.factor, Factor) else self.factor, + "shock": self.shock + } + + @classmethod + def from_dict(cls, obj): + return FactorShock(factor=obj.get("factor"), shock=obj.get("shock")) + + +class FactorShockParameters: + def __init__(self, + factor_shocks: List[FactorShock] = None, + propagate_shocks: bool = None, + risk_model: str = None): + self.__factor_shocks = factor_shocks + self.__propagate_shocks = propagate_shocks + self.__risk_model = risk_model + + @property + def factor_shocks(self) -> List[FactorShock]: + return self.__factor_shocks + + @factor_shocks.setter + def factor_shocks(self, factor_shocks: Union[List[FactorShock], Dict, pd.DataFrame]): + if isinstance(factor_shocks, pd.DataFrame): + factor_shocks_as_dict = factor_shocks.to_dict(orient='split') + self.__factor_shocks = [FactorShock(factor=f, shock=s) for f, s in + zip(factor_shocks_as_dict.get('columns'), factor_shocks_as_dict.get('data'))] + elif isinstance(factor_shocks, Dict): + self.__factor_shocks = [FactorShock(factor=k, shock=v) for k, v in factor_shocks.items()] + else: + self.__factor_shocks = factor_shocks + + @property + def propagate_shocks(self) -> bool: + return self.__propagate_shocks + + @propagate_shocks.setter + def propagate_shocks(self, propagate_shocks: bool): + self.__propagate_shocks = propagate_shocks + + @property + def risk_model(self) -> str: + return self.__risk_model + + @classmethod + def from_dict(cls, obj: Dict) -> 'FactorShockParameters': + return cls(factor_shocks=[FactorShock.from_dict(f_shock) for f_shock in obj.get('factorShocks')], + risk_model=obj.get("riskModel"), + propagate_shocks=obj.get("propagateShocks")) + + def to_dict(self) -> Dict: + return { + "riskModel": self.risk_model, + "propagateShocks": self.propagate_shocks, + "factorShocks": [f_shock.to_dict() for f_shock in self.factor_shocks] + } + + +class HistoricalSimulationParameters: + def __init__(self, + start_date: dt.date = None, + end_date: dt.date = None): + self.__start_date = start_date + self.__end_date = end_date + + @property + def start_date(self) -> dt.date: + return self.__start_date + + @start_date.setter + def start_date(self, start_date: dt.date): + self.__start_date = start_date + + @property + def end_date(self) -> dt.date: + return self.__end_date + + @end_date.setter + def end_date(self, end_date: dt.date): + self.__end_date = end_date + + @classmethod + def from_dict(cls, obj: Dict) -> 'HistoricalSimulationParameters': + return cls(start_date=dt.datetime.strptime(obj.get('startDate'), "%Y-%m-%d").date(), + end_date=dt.datetime.strptime(obj.get('endDate'), "%Y-%m-%d").date()) + + def to_dict(self) -> Dict: + return {"startDate": self.start_date, "endDate": self.end_date} + + +ScenarioParameters = Union[FactorShockParameters, HistoricalSimulationParameters] + + +class FactorScenario: + """ Marquee Factor-based Scenario """ + + def __init__(self, + name: str, + scenario_type: Union[str, FactorScenarioType], + parameters: Union[Dict, HistoricalSimulationParameters, FactorShockParameters], + id_: str = None, + description: str = None, + tags: List[str] = None): + self.__id = id_ + self.__name = name + self.__scenario_type = scenario_type + self.__description = description + self.__parameters = parameters \ + if any([isinstance(parameters, FactorShockParameters), + isinstance(parameters, HistoricalSimulationParameters)]) \ + else FactorShockParameters.from_dict(parameters) if scenario_type == FactorScenarioType.Factor_Shock \ + else HistoricalSimulationParameters.from_dict(parameters) + self.__tags = tags + + @property + def id(self) -> str: + return self.__id + + @property + def name(self) -> str: + return self.__name + + @name.setter + def name(self, name: str): + self.__name = name + + @property + def scenario_type(self) -> Union[str, FactorScenarioType]: + return self.__scenario_type + + @property + def description(self) -> str: + return self.__description + + @description.setter + def description(self, description: str): + self.description = description + + @property + def parameters(self) -> ScenarioParameters: + return self.__parameters + + @parameters.setter + def parameters(self, parameters: ScenarioParameters): + self.__parameters = parameters + + @property + def tags(self) -> List[str]: + return self.__tags + + @tags.setter + def tags(self, tags: List[str]): + self.__tags = tags + + @classmethod + def from_dict(cls, scenario_as_dict: Dict) -> 'FactorScenario': + scenario_data = { + "name": scenario_as_dict.get('name'), + "description": scenario_as_dict.get('description'), + "id_": scenario_as_dict.get('id'), + "scenario_type": scenario_as_dict.get('type'), + "parameters": get(scenario_as_dict, 'parameters', None), + "tags": scenario_as_dict.get('tags') + } + return cls(**scenario_data) + + @classmethod + def from_target(cls, target_scenario: TargetScenario): + scenario = cls(id_=target_scenario.id, + name=target_scenario.name, + scenario_type=target_scenario.type, + parameters=target_scenario.parameters, + description=target_scenario.description, + tags=target_scenario.tags) + + return scenario + + @classmethod + def get(cls, scenario_id: str): + scenario = GsFactorScenarioApi.get_scenario(scenario_id) + return cls.from_target(scenario) + + @classmethod + def get_by_name(cls, scenario_name: str): + scenario = GsFactorScenarioApi.get_scenario_by_name(scenario_name) + return cls.from_target(scenario) + + @classmethod + def get_many(cls, + ids: List[str] = None, + names: List[str] = None, + type: Union[str, FactorScenarioType] = None, + risk_model: str = None, + shocked_factors: List[str] = None, + shocked_factor_categories: List[str] = None, + propagated_shocks: bool = None, + start_date: dt.date = None, + end_date: dt.date = None) -> List['FactorScenario']: + many_scenarios_as_dict = GsFactorScenarioApi.get_many_scenarios( + ids, + names=names, + type=type, + risk_model=risk_model, + shocked_factors=shocked_factors, + shocked_factor_categories=shocked_factor_categories, + propagated_shocks=propagated_shocks, + start_date=start_date, + end_date=end_date) + + return [cls.from_target(scenario_as_target) for scenario_as_target in many_scenarios_as_dict] + + def save(self): + """ Update factor scenario or Create it if it does not exist""" + target_scenario = TargetScenario(name=self.name, + type_=self.scenario_type, + description=self.description, + parameters=self.parameters.to_dict(), + tags=tuple(self.tags)) + + if self.id: + target_scenario.id_ = self.id + GsFactorScenarioApi.update_scenario(target_scenario) + else: + GsFactorScenarioApi.create_scenario(target_scenario) + + +Scenario = Union[FactorScenario] diff --git a/gs_quant/target/backtests.py b/gs_quant/target/backtests.py index 9e3ddfba..6edd8f49 100644 --- a/gs_quant/target/backtests.py +++ b/gs_quant/target/backtests.py @@ -37,7 +37,8 @@ class BacktestRiskMeasureType(EnumBase, Enum): Strike_Level = 'Strike Level' Spot = 'Spot' Price_ATMS = 'Price ATMS' - Price_ATMF_Volatility = 'Price ATMF Volatility' + Price_ATMF_Volatility = 'Price ATMF Volatility' + Barrier_Level = 'Barrier Level' class BacktestTradingQuantityType(EnumBase, Enum): diff --git a/gs_quant/target/risk.py b/gs_quant/target/risk.py index 565b37bc..5cfbd4e2 100644 --- a/gs_quant/target/risk.py +++ b/gs_quant/target/risk.py @@ -33,6 +33,14 @@ class FactorRiskTableMode(EnumBase, Enum): Mctr = 'Mctr' +class FactorScenarioType(EnumBase, Enum): + + """Marquee factor scenario type""" + + Factor_Shock = 'Factor Shock' + Factor_Historical_Simulation = 'Factor Historical Simulation' + + class OptimizationStatus(EnumBase, Enum): """Optimization status.""" @@ -90,6 +98,24 @@ class ExecutionCostForHorizon(Base): name: Optional[str] = field(default=None, metadata=name_metadata) +@handle_camel_case_args +@dataclass_json(letter_case=LetterCase.CAMEL) +@dataclass(unsafe_hash=True, repr=False) +class FactorHistoricalSimulationScenarioParameters(Base): + start_date: datetime.date = field(default=None, metadata=field_metadata) + end_date: datetime.date = field(default=None, metadata=field_metadata) + name: Optional[str] = field(default=None, metadata=name_metadata) + + +@handle_camel_case_args +@dataclass_json(letter_case=LetterCase.CAMEL) +@dataclass(unsafe_hash=True, repr=False) +class FactorShock(Base): + factor: str = field(default=None, metadata=field_metadata) + shock: float = field(default=None, metadata=field_metadata) + name: Optional[str] = field(default=None, metadata=name_metadata) + + @handle_camel_case_args @dataclass_json(letter_case=LetterCase.CAMEL) @dataclass(unsafe_hash=True, repr=False) @@ -391,6 +417,29 @@ class RiskAtHorizon(Base): name: Optional[str] = field(default=None, metadata=name_metadata) +@handle_camel_case_args +@dataclass_json(letter_case=LetterCase.CAMEL) +@dataclass(unsafe_hash=True, repr=False) +class ScenarioGetManyRequestPathSchema(Base): + id_: Optional[Tuple[str, ...]] = field(default=None, metadata=config(field_name='id', exclude=exclude_none)) + limit: Optional[Tuple[str, ...]] = field(default=None, metadata=field_metadata) + offset: Optional[Tuple[str, ...]] = field(default=None, metadata=field_metadata) + propagate_shocks: Optional[Tuple[bool, ...]] = field(default=None, metadata=field_metadata) + short_name: Optional[Tuple[str, ...]] = field(default=None, metadata=field_metadata) + tags: Optional[Tuple[str, ...]] = field(default=None, metadata=field_metadata) + factor_scenario_type: Optional[Tuple[str, ...]] = field(default=None, metadata=field_metadata) + historical_simulation_end_date: Optional[Tuple[datetime.date, ...]] = field(default=None, metadata=field_metadata) + last_updated_by_id: Optional[Tuple[str, ...]] = field(default=None, metadata=field_metadata) + created_by_id: Optional[Tuple[str, ...]] = field(default=None, metadata=field_metadata) + shocked_factor: Optional[Tuple[str, ...]] = field(default=None, metadata=field_metadata) + owner_id: Optional[Tuple[str, ...]] = field(default=None, metadata=field_metadata) + name: Optional[Tuple[str, ...]] = field(default=None, metadata=field_metadata) + description: Optional[Tuple[str, ...]] = field(default=None, metadata=field_metadata) + historical_simulation_start_date: Optional[Tuple[datetime.date, ...]] = field(default=None, metadata=field_metadata) + shocked_factor_category: Optional[Tuple[str, ...]] = field(default=None, metadata=field_metadata) + scenario_group_id: Optional[Tuple[str, ...]] = field(default=None, metadata=field_metadata) + + @handle_camel_case_args @dataclass_json(letter_case=LetterCase.CAMEL) @dataclass(unsafe_hash=True, repr=False) @@ -402,6 +451,15 @@ class TradeCompleteAtHorizon(Base): name: Optional[str] = field(default=None, metadata=name_metadata) +@handle_camel_case_args +@dataclass_json(letter_case=LetterCase.CAMEL) +@dataclass(unsafe_hash=True, repr=False) +class FactorShockScenarioParameters(Base): + factor_shocks: Tuple[FactorShock, ...] = field(default=None, metadata=field_metadata) + propagate_shocks: bool = field(default=None, metadata=field_metadata) + name: Optional[str] = field(default=None, metadata=name_metadata) + + @handle_camel_case_args @dataclass_json(letter_case=LetterCase.CAMEL) @dataclass(unsafe_hash=True, repr=False) @@ -486,6 +544,22 @@ class OptimizationPortfolioCharacteristics(Base): name: Optional[str] = field(default=None, metadata=name_metadata) +@handle_camel_case_args +@dataclass_json(letter_case=LetterCase.CAMEL) +@dataclass(unsafe_hash=True, repr=False) +class ShockScopeFilter(Base): + region: Optional[Region] = field(default=None, metadata=field_metadata) + asset_id: Optional[str] = field(default=None, metadata=field_metadata) + gics_sector: Optional[str] = field(default=None, metadata=field_metadata) + country_code: Optional[CountryCode] = field(default=None, metadata=field_metadata) + gics_industry_group: Optional[str] = field(default=None, metadata=field_metadata) + gics_industry: Optional[str] = field(default=None, metadata=field_metadata) + gics_sub_industry: Optional[str] = field(default=None, metadata=field_metadata) + is_investment_grade: Optional[bool] = field(default=None, metadata=field_metadata) + tenor: Optional[str] = field(default=None, metadata=field_metadata) + name: Optional[str] = field(default=None, metadata=name_metadata) + + @handle_camel_case_args @dataclass_json(letter_case=LetterCase.CAMEL) @dataclass(unsafe_hash=True, repr=False) @@ -547,6 +621,16 @@ class OptimizationTradeSchedule(Base): name: Optional[str] = field(default=None, metadata=name_metadata) +@handle_camel_case_args +@dataclass_json(letter_case=LetterCase.CAMEL) +@dataclass(unsafe_hash=True, repr=False) +class ShockScope(Base): + asset_class: Optional[AssetClass] = field(default=None, metadata=field_metadata) + type_: Optional[str] = field(default=None, metadata=config(field_name='type', exclude=exclude_none)) + filter_: Optional[ShockScopeFilter] = field(default=None, metadata=config(field_name='filter', exclude=exclude_none)) + name: Optional[str] = field(default=None, metadata=name_metadata) + + @handle_camel_case_args @dataclass_json(letter_case=LetterCase.CAMEL) @dataclass(unsafe_hash=True, repr=False) @@ -605,6 +689,16 @@ class OptimizationAnalytics(Base): name: Optional[str] = field(default=None, metadata=name_metadata) +@handle_camel_case_args +@dataclass_json(letter_case=LetterCase.CAMEL) +@dataclass(unsafe_hash=True, repr=False) +class Shock(Base): + scope: ShockScope = field(default=None, metadata=field_metadata) + value: float = field(default=None, metadata=field_metadata) + type_: str = field(default=None, metadata=config(field_name='type', exclude=exclude_none)) + name: Optional[str] = field(default=None, metadata=name_metadata) + + @handle_camel_case_args @dataclass_json(letter_case=LetterCase.CAMEL) @dataclass(unsafe_hash=True, repr=False) @@ -623,6 +717,29 @@ class OptimizationResult(Base): name: Optional[str] = field(default=None, metadata=name_metadata) +@handle_camel_case_args +@dataclass_json(letter_case=LetterCase.CAMEL) +@dataclass(unsafe_hash=True, repr=False) +class Scenario(Base): + name: str = field(default=None, metadata=field_metadata) + shocks: Optional[Tuple[Shock, ...]] = field(default=None, metadata=field_metadata) + created_by_id: Optional[str] = field(default=None, metadata=field_metadata) + created_time: Optional[datetime.datetime] = field(default=None, metadata=field_metadata) + description: Optional[str] = field(default=None, metadata=field_metadata) + entitlements: Optional[Entitlements] = field(default=None, metadata=field_metadata) + entitlement_exclusions: Optional[EntitlementExclusions] = field(default=None, metadata=field_metadata) + id_: Optional[str] = field(default=None, metadata=config(field_name='id', exclude=exclude_none)) + identifiers: Optional[Tuple[Identifier, ...]] = field(default=None, metadata=field_metadata) + last_updated_by_id: Optional[str] = field(default=None, metadata=field_metadata) + last_updated_time: Optional[datetime.datetime] = field(default=None, metadata=field_metadata) + scenario_group_id: Optional[str] = field(default=None, metadata=field_metadata) + short_name: Optional[str] = field(default=None, metadata=field_metadata) + owner_id: Optional[str] = field(default=None, metadata=field_metadata) + tags: Optional[Tuple[str, ...]] = field(default=None, metadata=field_metadata) + type_: Optional[FactorScenarioType] = field(default=None, metadata=config(field_name='type', exclude=exclude_none)) + parameters: Optional[DictBase] = field(default=None, metadata=field_metadata) + + @handle_camel_case_args @dataclass_json(letter_case=LetterCase.CAMEL) @dataclass(unsafe_hash=True, repr=False) diff --git a/gs_quant/target/scenarios.py b/gs_quant/target/scenarios.py new file mode 100644 index 00000000..99b92a66 --- /dev/null +++ b/gs_quant/target/scenarios.py @@ -0,0 +1,94 @@ +""" +Copyright 2019 Goldman Sachs. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, +software distributed under the License is distributed on an +"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +KIND, either express or implied. See the License for the +specific language governing permissions and limitations +under the License. +""" + +from gs_quant.base import * +from gs_quant.common import * +import datetime +from typing import Dict, Optional, Tuple, Union +from dataclasses import dataclass, field +from dataclasses_json import LetterCase, config, dataclass_json + + +@handle_camel_case_args +@dataclass_json(letter_case=LetterCase.CAMEL) +@dataclass(unsafe_hash=True, repr=False) +class ByAssetPnlResult(Base): + asset_id: str = field(default=None, metadata=field_metadata) + name: str = field(default=None, metadata=field_metadata) + bbid: Optional[str] = field(default=None, metadata=field_metadata) + sector: Optional[str] = field(default=None, metadata=field_metadata) + industry: Optional[str] = field(default=None, metadata=field_metadata) + country: Optional[str] = field(default=None, metadata=field_metadata) + direction: Optional[str] = field(default=None, metadata=field_metadata) + exposure: Optional[float] = field(default=None, metadata=field_metadata) + estimated_pnl: Optional[float] = field(default=None, metadata=field_metadata) + estimated_performance: Optional[float] = field(default=None, metadata=field_metadata) + + +@handle_camel_case_args +@dataclass_json(letter_case=LetterCase.CAMEL) +@dataclass(unsafe_hash=True, repr=False) +class ErroredScenario(Base): + id_: Optional[str] = field(default=None, metadata=config(field_name='id', exclude=exclude_none)) + error_message: Optional[str] = field(default=None, metadata=field_metadata) + name: Optional[str] = field(default=None, metadata=name_metadata) + + +@handle_camel_case_args +@dataclass_json(letter_case=LetterCase.CAMEL) +@dataclass(unsafe_hash=True, repr=False) +class SummaryResult(Base): + estimated_pnl: float = field(default=None, metadata=field_metadata) + estimated_performance: Optional[float] = field(default=None, metadata=field_metadata) + net_exposure_post_scenario: Optional[float] = field(default=None, metadata=field_metadata) + name: Optional[str] = field(default=None, metadata=name_metadata) + + +@handle_camel_case_args +@dataclass_json(letter_case=LetterCase.CAMEL) +@dataclass(unsafe_hash=True, repr=False) +class PnlResult(Base): + name: str = field(default=None, metadata=field_metadata) + estimated_pnl: float = field(default=None, metadata=field_metadata) + factor_exposure: Optional[float] = field(default=None, metadata=field_metadata) + exposure: Optional[float] = field(default=None, metadata=field_metadata) + factor_shock: Optional[float] = field(default=None, metadata=field_metadata) + by_asset: Optional[ByAssetPnlResult] = field(default=None, metadata=field_metadata) + + +@handle_camel_case_args +@dataclass_json(letter_case=LetterCase.CAMEL) +@dataclass(unsafe_hash=True, repr=False) +class ScenarioResponse(Base): + summary: Optional[SummaryResult] = field(default=None, metadata=field_metadata) + factor_pnl: Optional[PnlResult] = field(default=None, metadata=field_metadata) + by_sector_aggregations: Optional[PnlResult] = field(default=None, metadata=field_metadata) + by_region_aggregations: Optional[PnlResult] = field(default=None, metadata=field_metadata) + by_direction_aggregations: Optional[PnlResult] = field(default=None, metadata=field_metadata) + by_asset: Optional[ByAssetPnlResult] = field(default=None, metadata=field_metadata) + name: Optional[str] = field(default=None, metadata=name_metadata) + + +@handle_camel_case_args +@dataclass_json(letter_case=LetterCase.CAMEL) +@dataclass(unsafe_hash=True, repr=False) +class ScenarioCalculationResponse(Base): + scenarios: Optional[Tuple[str, ...]] = field(default=None, metadata=field_metadata) + errored_scenarios: Optional[Tuple[ErroredScenario, ...]] = field(default=None, metadata=field_metadata) + results: Optional[Tuple[ScenarioResponse, ...]] = field(default=None, metadata=field_metadata) + name: Optional[str] = field(default=None, metadata=name_metadata) + + diff --git a/gs_quant/test/timeseries/test_measures_fx_vol.py b/gs_quant/test/timeseries/test_measures_fx_vol.py index 44645a66..4b98c440 100644 --- a/gs_quant/test/timeseries/test_measures_fx_vol.py +++ b/gs_quant/test/timeseries/test_measures_fx_vol.py @@ -33,6 +33,7 @@ from gs_quant.test.timeseries.utils import mock_request from gs_quant.timeseries import Currency, Cross, Bond, CurrencyEnum, SecurityMaster from gs_quant.timeseries.measures_fx_vol import _currencypair_to_tdapi_fxo_asset, _currencypair_to_tdapi_fxfwd_asset +from gs_quant.timeseries.measures_helper import VolReference _index = [pd.Timestamp('2021-03-30')] _test_datasets = ('TEST_DATASET',) @@ -343,79 +344,6 @@ def test_fwd_points(mocker): replace.restore() -def test_fx_vol_measure_legacy(mocker): - replace = Replacer() - args = dict(tenor='1m', strike_reference=tm_fxo.VolReference('delta_neutral'), relative_strike=0, - location=None, legacy_implementation=True) - - mock_gbp = Cross('MA26QSMPX9990G66', 'GBPUSD') - args['asset'] = mock_gbp - expected = tm.ExtendedSeries([1, 2, 3], index=_index * 3, name='impliedVolatility') - expected.dataset_ids = _test_datasets - mocker.patch.object(tm_rates, 'get_historical_and_last_for_measure', - return_value=expected) - mocker.patch.object(tm_rates, '_extract_series_from_df', - return_value=expected) - xrefs = replace('gs_quant.timeseries.measures.cross_stored_direction_for_fx_vol', Mock()) - xrefs.return_value = 'MA26QSMPX9990G66' - actual = tm_fxo.implied_volatility_fxvol(**args) - assert_series_equal(expected, actual) - replace.restore() - - args['legacy_implementation'] = False - xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock()) - xrefs.return_value = None - with pytest.raises(MqValueError): - tm_fxo.implied_volatility_fxvol(**args) - - xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock()) - xrefs.return_value = 'GBPUSD' - xrefs = replace('gs_quant.timeseries.measures._cross_stored_direction_helper', Mock()) - xrefs.return_value = 'GBPUSD' - mocker.patch.object(tm_fxo, 'implied_volatility_new', - return_value=expected) - actual = tm_fxo.implied_volatility_fxvol(**args) - assert_series_equal(expected, actual) - - xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock()) - xrefs.return_value = 'USDUSD' - xrefs = replace('gs_quant.timeseries.measures_fx_vol._cross_stored_direction_helper', Mock()) - xrefs.return_value = 'GBPUSD' - assets = replace('gs_quant.markets.securities.SecurityMaster.get_asset', Mock()) - assets.return_value = mock_gbp - actual = tm_fxo.implied_volatility_fxvol(**args) - assert_series_equal(expected, actual) - - args['strike_reference'] = tm_fxo.VolReference('delta_call') - args['relative_strike'] = 25 - actual = tm_fxo.implied_volatility_fxvol(**args) - assert_series_equal(expected, actual) - - args['strike_reference'] = tm_fxo.VolReference('delta_put') - args['relative_strike'] = 25 - actual = tm_fxo.implied_volatility_fxvol(**args) - assert_series_equal(expected, actual) - - args['strike_reference'] = tm_fxo.VolReference('spot') - args['relative_strike'] = 100 - actual = tm_fxo.implied_volatility_fxvol(**args) - assert_series_equal(expected, actual) - - args['strike_reference'] = tm_fxo.VolReference('forward') - args['relative_strike'] = 100 - actual = tm_fxo.implied_volatility_fxvol(**args) - assert_series_equal(expected, actual) - - args['strike_reference'] = tm_fxo.VolReference('normalized') - args['relative_strike'] = 100 - xrefs = replace('gs_quant.timeseries.measures_fx_vol._preprocess_implied_vol_strikes_fx', Mock()) - xrefs.return_value = ['normalized', 0] - with pytest.raises(MqValueError): - tm_fxo.implied_volatility_fxvol(**args) - - replace.restore() - - def mock_df(): d = { 'strikeVol': [5, 1, 2], @@ -522,5 +450,83 @@ def test_vol_swap_strike(): replace.restore() +def test_implied_volatility_fxvol(mocker): + replace = Replacer() + + args = dict(tenor="1y", location=None) + mock_eur = Cross('MAGZMXVM0J282ZTR', 'EURUSD') + args['asset'] = mock_eur + + xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock()) + xrefs.return_value = 'EURUSD' + xrefs = replace('gs_quant.timeseries.measures_fx_vol._cross_stored_direction_helper', Mock()) + xrefs.return_value = 'GBPUSD' + assets = replace('gs_quant.markets.securities.SecurityMaster.get_asset', Mock()) + assets.return_value = mock_eur + + preprocess_impl_vol = replace('gs_quant.timeseries.measures_fx_vol._preprocess_implied_vol_strikes_fx', Mock()) + replace('gs_quant.timeseries.measures_fx_vol._get_tdapi_fxo_assets', Mock()) + mocker.patch.object(GsDataApi, 'get_market_data', return_value=mock_curr(None, None)) + expected = tm.ExtendedSeries([1, 2, 3], index=_index * 3, name='impliedVolatility') + + args['strike_reference'] = VolReference.DELTA_CALL + args['relative_strike'] = 50 + preprocess_impl_vol.return_value = ['abc', 50] + with pytest.raises(MqValueError): + tm_fxo.implied_volatility_fxvol(**args) + + args['strike_reference'] = VolReference.DELTA_CALL + args['relative_strike'] = 50 + preprocess_impl_vol.return_value = ['delta', 50] + actual = tm_fxo.implied_volatility_fxvol(**args) + assert_series_equal(expected, actual) + + args['strike_reference'] = VolReference.DELTA_PUT + args['relative_strike'] = 25 + preprocess_impl_vol.return_value = ['delta', -25] + actual = tm_fxo.implied_volatility_fxvol(**args) + assert_series_equal(expected, actual) + + args['strike_reference'] = VolReference.DELTA_CALL + args['relative_strike'] = 0 + preprocess_impl_vol.return_value = ['delta', 0] + actual = tm_fxo.implied_volatility_fxvol(**args) + assert_series_equal(expected, actual) + + args['strike_reference'] = VolReference.SPOT + args['relative_strike'] = 100 + preprocess_impl_vol.return_value = ['spot', 100] + actual = tm_fxo.implied_volatility_fxvol(**args) + assert_series_equal(expected, actual) + + args['strike_reference'] = VolReference.FORWARD + args['relative_strike'] = 100 + preprocess_impl_vol.return_value = ['forward', 100] + actual = tm_fxo.implied_volatility_fxvol(**args) + assert_series_equal(expected, actual) + + args['strike_reference'] = VolReference.NORMALIZED + args['relative_strike'] = 100 + preprocess_impl_vol = replace('gs_quant.timeseries.measures_fx_vol._preprocess_implied_vol_strikes_fx', Mock()) + preprocess_impl_vol.return_value = ['normalized', 0] + with pytest.raises(MqValueError): + tm_fxo.implied_volatility_fxvol(**args) + + xrefs = replace('gs_quant.timeseries.measures_fx_vol._cross_stored_direction_helper', Mock()) + xrefs.return_value = 'EURUSD' + args['strike_reference'] = VolReference.DELTA_CALL + args['relative_strike'] = 50 + preprocess_impl_vol.return_value = ['delta', 50] + actual = tm_fxo.implied_volatility_fxvol(**args) + assert_series_equal(expected, actual) + + xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock()) + xrefs.return_value = None + with pytest.raises(MqValueError): + tm_fxo.implied_volatility_fxvol(**args) + + replace.restore() + + if __name__ == '__main__': pytest.main(args=["test_measures_fx_vol.py"]) diff --git a/gs_quant/test/timeseries/test_measures_reports.py b/gs_quant/test/timeseries/test_measures_reports.py index e800c612..45d2063e 100644 --- a/gs_quant/test/timeseries/test_measures_reports.py +++ b/gs_quant/test/timeseries/test_measures_reports.py @@ -15,6 +15,7 @@ """ import copy import datetime +import math import pandas import pandas as pd @@ -117,6 +118,71 @@ } ] + +factor_exposure_data = [ + { + 'date': '2024-03-14', + 'reportId': 'report_id', + 'factor': 'Factor_1', + 'factorCategory': 'Style', + 'pnl': 19.23, + 'exposure': 100, + 'proportionOfRisk': 1 + }, + { + 'date': '2024-03-14', + 'reportId': 'report_id', + 'factor': 'Factor_2', + 'factorCategory': 'Style', + 'pnl': 14.24, + 'exposure': 100, + 'proportionOfRisk': 2 + }, + { + 'date': '2024-03-14', + 'reportId': 'report_id', + 'factor': 'Factor_3', + 'factorCategory': 'Style', + 'pnl': 21.25, + 'exposure': 100, + 'proportionOfRisk': 3 + } +] + + +factor_return_data = [ + { + "date": "2024-03-13", + "factor": "Factor_1", + "return": 2 + }, + { + "date": "2024-03-13", + "factor": "Factor_2", + "return": 3 + }, + { + "date": "2024-03-13", + "factor": "Factor_3", + "return": 2 + }, + { + "date": "2024-03-14", + "factor": "Factor_1", + "return": 2 + }, + { + "date": "2024-03-14", + "factor": "Factor_2", + "return": 3 + }, + { + "date": "2024-03-14", + "factor": "Factor_3", + "return": 2 + }, +] + aggregate_factor_data = [ { 'date': '2020-11-23', @@ -1140,5 +1206,51 @@ def test_pnl_percent(): replace.restore() +def test_historical_simulation_estimated_pnl(): + replace = Replacer() + + # mock getting report + mock = replace('gs_quant.api.gs.reports.GsReportApi.get_report', Mock()) + mock.return_value = factor_risk_report + + # mock getting report factor data + mock = replace('gs_quant.api.gs.reports.GsReportApi.get_factor_risk_report_results', Mock()) + mock.return_value = factor_exposure_data + + # mock sending data request + mock = replace('gs_quant.api.gs.data.GsDataApi.execute_query', Mock()) + mock.return_value = {"data": factor_return_data} + + with DataContext(datetime.date(2024, 3, 13), datetime.date(2024, 3, 14)): + actual = mr.historical_simulation_estimated_pnl('report_id') + actual_values = list(actual.values) + assert all([math.isclose(x, actual_values[i]) for i, x in enumerate([7.0, 14.17])]) + + replace.restore() + + +def test_historical_simulation_estimated_factor_attribution(): + replace = Replacer() + + # mock getting report + mock = replace('gs_quant.api.gs.reports.GsReportApi.get_report', Mock()) + mock.return_value = factor_risk_report + + # mock getting report factor data + mock = replace('gs_quant.api.gs.reports.GsReportApi.get_factor_risk_report_results', Mock()) + mock.return_value = factor_exposure_data + + # mock sending data request + mock = replace('gs_quant.api.gs.data.GsDataApi.execute_query', Mock()) + mock.return_value = {"data": [data for data in factor_return_data if data['factor'] == 'Factor_1']} + + with DataContext(datetime.date(2024, 3, 13), datetime.date(2024, 3, 14)): + actual = mr.historical_simulation_estimated_factor_attribution('report_id', "Factor_1") + actual_values = list(actual.values) + assert all([math.isclose(x, actual_values[i]) for i, x in enumerate([2.0, 4.04])]) + + replace.restore() + + if __name__ == '__main__': pytest.main(args=[__file__]) diff --git a/gs_quant/timeseries/measures_fx_vol.py b/gs_quant/timeseries/measures_fx_vol.py index 53660055..a6faf633 100644 --- a/gs_quant/timeseries/measures_fx_vol.py +++ b/gs_quant/timeseries/measures_fx_vol.py @@ -23,13 +23,11 @@ from gs_quant.api.gs.assets import GsAssetApi from gs_quant.api.gs.data import QueryType, GsDataApi -from gs_quant.data.log import log_debug from gs_quant.errors import MqValueError from gs_quant.markets.securities import AssetIdentifier, Asset, SecurityMaster from gs_quant.target.common import AssetClass, AssetType, PricingLocation from gs_quant.timeseries import ASSET_SPEC, plot_measure, MeasureDependency from gs_quant.timeseries import ExtendedSeries, measures_rates as tm_rates -from gs_quant.timeseries import measures as tm from gs_quant.timeseries.measures import _asset_from_spec, _market_data_timed, _cross_stored_direction_helper, \ _preprocess_implied_vol_strikes_fx, _tenor_month_to_year from gs_quant.timeseries.measures_helper import VolReference @@ -553,43 +551,6 @@ def implied_volatility_new(asset: Asset, expiry_tenor: str, strike: str, option_ return series -""" -Legacy implementation -""" - - -def legacy_implied_volatility(asset: Asset, tenor: str, strike_reference: VolReference = None, - relative_strike: Real = None, *, source: str = None, real_time: bool = False, - request_id: Optional[str] = None) -> Series: - """ - Volatility of an asset implied by observations of market prices. - - :param asset: asset object loaded from security master - :param tenor: relative date representation of expiration date e.g. 1m - or absolute calendar strips e.g. 'Cal20', 'F20-G20' - :param strike_reference: reference for strike level - :param relative_strike: strike relative to reference - :param source: name of function caller - :param real_time: whether to retrieve intraday data instead of EOD - :param request_id: service request id, if any - :return: implied volatility curve - """ - - asset_id = tm.cross_stored_direction_for_fx_vol(asset.get_marquee_id()) - ref_string, relative_strike = tm._preprocess_implied_vol_strikes_fx(strike_reference, relative_strike) - - log_debug(request_id, _logger, 'where tenor=%s, strikeReference=%s, relativeStrike=%s', tenor, ref_string, - relative_strike) - tenor = tm._tenor_month_to_year(tenor) - where = dict(tenor=tenor, strikeReference=ref_string, relativeStrike=relative_strike) - # Parallel calls when fetching / appending last results - df = tm.get_historical_and_last_for_measure([asset_id], QueryType.IMPLIED_VOLATILITY, where, source=source, - real_time=real_time, request_id=request_id) - - s = tm._extract_series_from_df(df, QueryType.IMPLIED_VOLATILITY) - return s - - """ New Implementation """ @@ -602,8 +563,7 @@ def legacy_implied_volatility(asset: Asset, tenor: str, strike_reference: VolRef def implied_volatility_fxvol(asset: Asset, tenor: str, strike_reference: VolReference = None, relative_strike: Real = None, location: Optional[PricingLocation] = None, legacy_implementation: bool = False, *, - source: str = None, real_time: bool = False, - request_id: Optional[str] = None) -> Series: + source: str = None, real_time: bool = False) -> Series: """ Volatility of an asset implied by observations of market prices. @@ -613,17 +573,12 @@ def implied_volatility_fxvol(asset: Asset, tenor: str, strike_reference: VolRefe :param strike_reference: reference for strike level :param relative_strike: strike relative to reference :param location: location of the data snapshot Example - "HKG", "LDN", "NYC" - :param legacy_implementation: use FX_IVOl over FX_VANILLA_OPTIONS_VOLS + :param legacy_implementation: Deprecated (supplied values are ignored) :param source: name of function caller :param real_time: whether to retrieve intraday data instead of EOD - :param request_id: service request id, if any :return: implied volatility curve """ - if legacy_implementation: - return (legacy_implied_volatility(asset, tenor, strike_reference, relative_strike, - source=source, real_time=real_time, request_id=request_id)) - bbid = asset.get_identifier(AssetIdentifier.BLOOMBERG_ID) if bbid is not None: cross = _cross_stored_direction_helper(bbid) diff --git a/gs_quant/timeseries/measures_reports.py b/gs_quant/timeseries/measures_reports.py index 38d99e5d..32dfcc17 100644 --- a/gs_quant/timeseries/measures_reports.py +++ b/gs_quant/timeseries/measures_reports.py @@ -15,14 +15,15 @@ """ import datetime as dt from enum import Enum -from typing import Optional +from typing import Optional, Union, List import pandas as pd import numpy as np +import math from pandas.tseries.offsets import BDay from pydash import decapitalize -from gs_quant.api.gs.data import QueryType +from gs_quant.api.gs.data import QueryType, GsDataApi, DataQuery from gs_quant.data.core import DataContext from gs_quant.datetime import prev_business_date from gs_quant.entities.entity import EntityType @@ -339,6 +340,91 @@ def pnl(report_id: str, unit: str = 'Notional', *, source: str = None, return pd.Series(pnl_df['pnl'], name="pnl") +@plot_measure_entity(EntityType.REPORT) +def historical_simulation_estimated_pnl(report_id: str, *, source: str = None, + real_time: bool = False, request_id: Optional[str] = None) -> pd.Series: + """ + Estimated Pnl from replaying a historical simulation scenario on your latest positions + :param report_id: id of performance report + :param source: name of function caller + :param real_time: whether to retrieve intraday data instead of EOD + :param request_id: server request id + :return: portfolio estimated pnl + """ + + factor_attributed_pnl = _replay_historical_factor_moves_on_latest_positions(report_id, []) + total_factor_attributed_pnl = factor_attributed_pnl.apply(np.sum, axis=1).to_frame("estimatedPnl") + + total_factor_attributed_pnl.index = pd.to_datetime(total_factor_attributed_pnl.index) + return total_factor_attributed_pnl.squeeze() + + +@plot_measure_entity(EntityType.REPORT) +def historical_simulation_estimated_factor_attribution(report_id: str, factor_name: str, *, source: str = None, + real_time: bool = False, + request_id: Optional[str] = None) -> pd.Series: + """ + Estimated Pnl attributed to the factor after replaying a historical simulation scenario on a portfolio's latest + positions + :param report_id: id of performance report + :param factor_name: name of the factor + :param source: name of function caller + :param real_time: whether to retrieve intraday data instead of EOD + :param request_id: server request id + :return: portfolio estimated pnl + """ + + factor_attributed_pnl = _replay_historical_factor_moves_on_latest_positions(report_id, [factor_name]) + factor_attributed_pnl.index = pd.to_datetime(factor_attributed_pnl.index) + + return factor_attributed_pnl.squeeze() + + +def _replay_historical_factor_moves_on_latest_positions(report_id: str, factors: List[str]) -> \ + Union[pd.Series, pd.DataFrame]: + start_date = DataContext.current.start_time.date() + end_date = DataContext.current.end_time.date() + risk_report = FactorRiskReport.get(report_id) + risk_model_id = risk_report.get_risk_model_id() + + # Get data in batches of 365 days + date_range = pd.bdate_range(start_date, end_date) + batches = np.array_split([d.date() for d in date_range.tolist()], math.ceil(len(date_range.tolist()) / 365)) + data_query_results = [] + query = {"riskModel": risk_model_id} + if factors: + query.update({"factor": factors}) + for batch in batches: + data_query_results += GsDataApi.execute_query( + 'RISK_MODEL_FACTOR', + DataQuery( + where=query, + start_date=batch[0], + end_date=batch[-1] + ) + ).get('data', []) + + return_data = pd.DataFrame(data_query_results).pivot(columns="factor", index="date", values="return").sort_index() + return_data_aggregated = (return_data / 100).apply(geometrically_aggregate) + + latest_report_date = risk_report.latest_end_date + factor_exposures = risk_report.get_results( + start_date=latest_report_date, + end_date=latest_report_date, + return_format=ReturnFormat.JSON + ) + factor_exposure_df = pd.DataFrame(factor_exposures).pivot(columns="factor", + index="date", + values="exposure").sort_index() + + factor_exposure_df = factor_exposure_df.reindex(columns=return_data_aggregated.columns) + factor_attributed_pnl_values = return_data_aggregated.values * factor_exposure_df.values + factor_attributed_pnl = pd.DataFrame(factor_attributed_pnl_values, index=return_data_aggregated.index, + columns=return_data_aggregated.columns) + + return factor_attributed_pnl + + def _get_factor_data(report_id: str, factor_name: str, query_type: QueryType, unit: Unit = Unit.NOTIONAL) -> pd.Series: # Check params report = FactorRiskReport.get(report_id) diff --git a/gs_quant/timeseries/measures_risk_models.py b/gs_quant/timeseries/measures_risk_models.py index a1c8fdb5..00baa584 100644 --- a/gs_quant/timeseries/measures_risk_models.py +++ b/gs_quant/timeseries/measures_risk_models.py @@ -22,7 +22,8 @@ from gs_quant.data.core import DataContext from gs_quant.entities.entity import EntityType from gs_quant.markets.securities import Asset, AssetIdentifier -from gs_quant.models.risk_model import FactorRiskModel, ReturnFormat, MarqueeRiskModel +from gs_quant.models.risk_model import FactorRiskModel, MarqueeRiskModel +from gs_quant.markets.factor import ReturnFormat from gs_quant.target.common import AssetClass, AssetType from gs_quant.target.risk_models import RiskModelDataMeasure, RiskModelDataAssetsRequest, \ RiskModelUniverseIdentifierRequest