From 75d9ac15f06fde319ffc915c8916f7b81e7a624e Mon Sep 17 00:00:00 2001 From: "Horimbere, Brice [GMD]" Date: Tue, 25 Apr 2023 11:16:53 -0500 Subject: [PATCH] Gs Quant release 0.9.102 --- gs_quant/analytics/processors/__init__.py | 3 +- gs_quant/backtests/triggers.py | 22 +++- gs_quant/models/risk_model_utils.py | 44 ++----- gs_quant/test/models/test_risk_model.py | 123 +++++++++++++++++- gs_quant/test/timeseries/test_backtesting.py | 30 +++++ .../test/timeseries/test_measures_fx_vol.py | 25 +++- gs_quant/test/utils/mock_request.py | 10 +- gs_quant/timeseries/backtesting.py | 21 +-- gs_quant/timeseries/measures.py | 16 ++- gs_quant/timeseries/measures_fx_vol.py | 120 ++++++++++------- 10 files changed, 309 insertions(+), 105 deletions(-) diff --git a/gs_quant/analytics/processors/__init__.py b/gs_quant/analytics/processors/__init__.py index d1fcf68b..9a763283 100644 --- a/gs_quant/analytics/processors/__init__.py +++ b/gs_quant/analytics/processors/__init__.py @@ -19,7 +19,8 @@ ReturnsProcessor, BetaProcessor, FXImpliedCorrProcessor from .special_processors import EntityProcessor, CoordinateProcessor from .statistics_processors import PercentilesProcessor, PercentileProcessor, StdMoveProcessor, \ - CovarianceProcessor, ZscoresProcessor, MeanProcessor, VarianceProcessor, SumProcessor, StdDevProcessor + CovarianceProcessor, ZscoresProcessor, MeanProcessor, VarianceProcessor, SumProcessor, StdDevProcessor, \ + CompoundGrowthRate from .utility_processors import LastProcessor, AppendProcessor, AdditionProcessor, SubtractionProcessor, \ MultiplicationProcessor, DivisionProcessor, MinProcessor, MaxProcessor, NthLastProcessor, OneDayProcessor from .scale_processors import ScaleProcessor, BarMarkerProcessor, SpotMarkerProcessor, ScaleShape diff --git a/gs_quant/backtests/triggers.py b/gs_quant/backtests/triggers.py index 80b2b03a..eb55f1f0 100644 --- a/gs_quant/backtests/triggers.py +++ b/gs_quant/backtests/triggers.py @@ -20,8 +20,10 @@ from gs_quant.backtests.actions import Action, AddTradeAction, AddTradeActionInfo from gs_quant.backtests.backtest_objects import BackTest, PredefinedAssetBacktest from gs_quant.backtests.backtest_utils import make_list, CalcType -from gs_quant.datetime.relative_date import RelativeDateSchedule from gs_quant.backtests.data_sources import * +from gs_quant.datetime.relative_date import RelativeDateSchedule +from gs_quant.risk.transform import Transformer +from gs_quant.risk import RiskMeasure class TriggerDirection(Enum): @@ -41,7 +43,8 @@ def __init__(self): class PeriodicTriggerRequirements(TriggerRequirements): - def __init__(self, start_date=None, end_date=None, frequency=None, calendar=None): + def __init__(self, start_date: dt.date = None, end_date: dt.date = None, frequency: str = None, + calendar: str = None): super().__init__() self.start_date = start_date self.end_date = end_date @@ -50,7 +53,7 @@ def __init__(self, start_date=None, end_date=None, frequency=None, calendar=None class IntradayTriggerRequirements(TriggerRequirements): - def __init__(self, start_time, end_time, frequency): + def __init__(self, start_time: dt.datetime, end_time: dt.datetime, frequency: str): super().__init__() self.start_time = start_time self.end_time = end_time @@ -58,7 +61,7 @@ def __init__(self, start_time, end_time, frequency): class MktTriggerRequirements(TriggerRequirements): - def __init__(self, data_source, trigger_level, direction): + def __init__(self, data_source: DataSource, trigger_level: float, direction: TriggerDirection): super().__init__() self.data_source = data_source self.trigger_level = trigger_level @@ -66,11 +69,13 @@ def __init__(self, data_source, trigger_level, direction): class RiskTriggerRequirements(TriggerRequirements): - def __init__(self, risk, trigger_level, direction): + def __init__(self, risk: RiskMeasure, trigger_level: float, direction: TriggerDirection, + risk_transformation: Optional[Transformer] = None): super().__init__() self.risk = risk self.trigger_level = trigger_level self.direction = direction + self.risk_transformation = risk_transformation class AggregateTriggerRequirements(TriggerRequirements): @@ -258,7 +263,12 @@ def __init__(self, self._risks += [trigger_requirements.risk] def has_triggered(self, state: dt.date, backtest: BackTest = None) -> TriggerInfo: - risk_value = backtest.results[state][self._trigger_requirements.risk].aggregate() + if self.trigger_requirements.risk_transformation is None: + risk_value = backtest.results[state][self._trigger_requirements.risk].aggregate() + else: + risk_value = backtest.results[state][self._trigger_requirements.risk].transform( + risk_transformation=self.trigger_requirements.risk_transformation).aggregate( + allow_mismatch_risk_keys=True) if self._trigger_requirements.direction == TriggerDirection.ABOVE: if risk_value > self._trigger_requirements.trigger_level: return TriggerInfo(True) diff --git a/gs_quant/models/risk_model_utils.py b/gs_quant/models/risk_model_utils.py index 45b8ef49..95ce969b 100644 --- a/gs_quant/models/risk_model_utils.py +++ b/gs_quant/models/risk_model_utils.py @@ -205,44 +205,28 @@ def batch_and_upload_partial_data(model_id: str, data: dict, max_asset_size: int date = data.get('date') _upload_factor_data_if_present(model_id, data, date) sleep(2) - _repeat_try_catch_request(_batch_data_v2, model_id=model_id, data=data, max_asset_size=max_asset_size, date=date) - - -def _batch_data_v2(model_id: str, data: dict, max_asset_size: int, date: Union[str, dt.date]): - if data.get('assetData'): - asset_data_list, target_size = _batch_input_data({'assetData': data.get('assetData')}, max_asset_size) - for i in range(len(asset_data_list)): - final_upload = True if i == len(asset_data_list) - 1 else False + for risk_model_data_type in ["assetData", "issuerSpecificCovariance", "factorPortfolios"]: + _repeat_try_catch_request(_batch_data_v2, model_id=model_id, data=data.get(risk_model_data_type), + data_type=risk_model_data_type, max_asset_size=max_asset_size, date=date) + sleep(2) + + +def _batch_data_v2(model_id: str, data: dict, data_type: str, max_asset_size: int, date: Union[str, dt.date]): + if data: + if data_type in ["issuerSpecificCovariance", "factorPortfolios"]: + max_asset_size //= 2 + data_list, _ = _batch_input_data({data_type: data}, max_asset_size) + for i in range(len(data_list)): + final_upload = True if i == len(data_list) - 1 else False try: res = GsFactorRiskModelApi.upload_risk_model_data(model_id=model_id, - model_data={'assetData': asset_data_list[i], - 'date': date}, + model_data={data_type: data_list[i], 'date': date}, partial_upload=True, final_upload=final_upload) logging.info(res) except (MqRequestError, Exception) as e: raise e - if 'issuerSpecificCovariance' in data.keys() or 'factorPortfolios' in data.keys(): - for optional_input_key in ['issuerSpecificCovariance', 'factorPortfolios']: - if data.get(optional_input_key): - optional_data = data.get(optional_input_key) - optional_data_list, target_size = _batch_input_data({optional_input_key: optional_data}, - max_asset_size // 2) - logging.info(f'{optional_input_key} being uploaded for {date}...') - for i in range(len(optional_data_list)): - final_upload = True if i == len(optional_data_list) - 1 else False - try: - res = GsFactorRiskModelApi.upload_risk_model_data(model_id=model_id, - model_data={ - optional_input_key: optional_data_list[i], - 'date': date}, - partial_upload=True, - final_upload=final_upload) - logging.info(res) - except (MqRequestError, Exception) as e: - raise e - def batch_and_upload_coverage_data(date: dt.date, gsid_list: list, model_id: str): update_time = dt.datetime.today().strftime("%Y-%m-%dT%H:%M:%SZ") diff --git a/gs_quant/test/models/test_risk_model.py b/gs_quant/test/models/test_risk_model.py index f4eb91ff..f824bbe5 100644 --- a/gs_quant/test/models/test_risk_model.py +++ b/gs_quant/test/models/test_risk_model.py @@ -18,12 +18,11 @@ from gs_quant.models.risk_model import FactorRiskModel, MacroRiskModel, ReturnFormat, Unit from gs_quant.session import * -from gs_quant.target.risk_models import RiskModel as Risk_Model, RiskModelCoverage, RiskModelTerm,\ +from gs_quant.target.risk_models import RiskModel as Risk_Model, RiskModelCoverage, RiskModelTerm, \ RiskModelUniverseIdentifier, RiskModelType, RiskModelDataAssetsRequest as DataAssetsRequest, \ RiskModelDataMeasure as Measure, RiskModelUniverseIdentifierRequest as UniverseIdentifier import datetime as dt - empty_entitlements = { "execute": [], "edit": [], @@ -677,5 +676,125 @@ def test_get_specific_return(mocker): assert response == specific_return_response +def test_upload_risk_model_data(mocker): + model = mock_risk_model(mocker) + risk_model_data = { + 'date': '2023-04-14', + 'assetData': { + 'universe': ['2407966', '2046251', 'USD'], + 'specificRisk': [12.09, 45.12, 3.09], + 'factorExposure': [ + {'1': 0.23, '2': 0.023}, + {'1': 0.23}, + {'3': 0.23, '2': 0.023} + ], + 'totalRisk': [0.12, 0.45, 1.2] + }, + 'factorData': [ + { + 'factorId': '1', + 'factorName': 'USD', + 'factorCategory': 'Currency', + 'factorCategoryId': 'CUR' + }, + { + 'factorId': '2', + 'factorName': 'ST', + 'factorCategory': 'ST', + 'factorCategoryId': 'ST' + }, + { + 'factorId': '3', + 'factorName': 'IND', + 'factorCategory': 'IND', + 'factorCategoryId': 'IND' + } + ], + 'covarianceMatrix': [[0.089, 0.0123, 0.345], + [0.0123, 3.45, 0.345], + [0.345, 0.345, 1.23]], + 'issuerSpecificCovariance': { + 'universeId1': ['2407966'], + 'universeId2': ['2046251'], + 'covariance': [0.03754] + }, + 'factorPortfolios': { + 'universe': ['2407966', '2046251'], + 'portfolio': [{'factorId': 1, 'weights': [0.25, 0.75]}, + {'factorId': 2, 'weights': [0.25, 0.75]}, + {'factorId': 3, 'weights': [0.25, 0.75]}] + } + } + + base_url = f"/risk/models/data/{model.id}?partialUpload=true" + date = risk_model_data.get("date") + max_asset_batch_size = 2 + + batched_asset_data = [ + {"assetData": {key: value[i:i + max_asset_batch_size] for key, value in + risk_model_data.get("assetData").items()}, "date": date, + } for i in range(0, len(risk_model_data.get("assetData").get("universe")), max_asset_batch_size) + ] + + max_asset_batch_size //= 2 + batched_factor_portfolios = [ + {"factorPortfolios": + {key: (value[i:i + max_asset_batch_size] if key in "universe" else + [{"factorId": factor_weights.get("factorId"), + "weights": factor_weights.get("weights")[i:i + max_asset_batch_size]} for factor_weights in value]) + for key, value in risk_model_data.get("factorPortfolios").items()}, + "date": date + } for i in range(0, len(risk_model_data.get("factorPortfolios").get("universe")), max_asset_batch_size) + ] + + expected_factor_data_calls = [ + mock.call(base_url, {"date": date, "factorData": risk_model_data.get("factorData"), + "covarianceMatrix": risk_model_data.get("covarianceMatrix")}, timeout=200) + ] + + expected_asset_data_calls = [] + for batch_num, batch_asset_payload in enumerate(batched_asset_data): + final_upload_flag = 'true' if batch_num == len(batched_asset_data) - 1 else 'false' + expected_asset_data_calls.append( + mock.call(f"{base_url}&finalUpload={final_upload_flag}", batch_asset_payload, timeout=200) + ) + + expected_factor_portfolios_data_calls = [] + for batch_num, batched_fp_payload in enumerate(batched_factor_portfolios): + final_upload_flag = 'true' if batch_num == len(batched_factor_portfolios) - 1 else 'false' + expected_factor_portfolios_data_calls.append( + mock.call(f"{base_url}&finalUpload={final_upload_flag}", batched_fp_payload, timeout=200) + ) + + expected_isc_data_calls = [ + mock.call(f"{base_url}&finalUpload=true", + {"issuerSpecificCovariance": risk_model_data.get("issuerSpecificCovariance"), "date": date}, + timeout=200) + ] + + expected_calls = expected_factor_data_calls + expected_asset_data_calls + \ + expected_isc_data_calls + expected_factor_portfolios_data_calls + + # mock GsSession + mocker.patch.object( + GsSession.__class__, + 'default_value', + return_value=GsSession.get( + Environment.QA, + 'client_id', + 'secret')) + mocker.patch.object(GsSession.current, '_post', return_value='Upload Successful') + + max_asset_batch_size = 2 + model.upload_data(risk_model_data, max_asset_batch_size=max_asset_batch_size) + + call_args_list = GsSession.current._post.call_args_list + + assert len(call_args_list) == len(expected_calls) + assert call_args_list == expected_calls + + GsSession.current._post.assert_has_calls(expected_calls, any_order=False) + + if __name__ == "__main__": pytest.main([__file__]) diff --git a/gs_quant/test/timeseries/test_backtesting.py b/gs_quant/test/timeseries/test_backtesting.py index b8718457..ee677f6d 100644 --- a/gs_quant/test/timeseries/test_backtesting.py +++ b/gs_quant/test/timeseries/test_backtesting.py @@ -70,6 +70,36 @@ def test_basket_series(): index=dates) assert_series_equal(mreb, basket_series([mreb], [1], rebal_freq=RebalFreq.MONTHLY)) + dates = [ + datetime.datetime(2019, 1, 1), + datetime.datetime(2019, 1, 2), + datetime.datetime(2019, 1, 3), + datetime.datetime(2019, 1, 4), + datetime.datetime(2019, 1, 5), + datetime.datetime(2019, 1, 8), + datetime.datetime(2019, 1, 9), + datetime.datetime(2019, 1, 10), + datetime.datetime(2019, 1, 11), + datetime.datetime(2019, 1, 12), + datetime.datetime(2019, 1, 13) + ] + wreb = pd.Series( + [100.0, 105, 110, 115, 120, 125, + 130, 135, 140, 145, 150], + index=dates) + + wreb_2 = pd.Series( + [100.0, 105, 110, 115, 120, 125, + 130, 135, 140, 145, 150], + index=dates) + + ret_wreb = pd.Series( + [100.0, 110.0, 120.0, 130.0, 140.0, 150.0, + 162.0, 174.0, 186.0, 198.0, 210.0], + index=dates) + + assert_series_equal(ret_wreb, basket_series([wreb, wreb_2], [1, 1], rebal_freq=RebalFreq.WEEKLY)) + def _mock_spot_data(): dates = pd.date_range(start='2021-01-01', periods=6) diff --git a/gs_quant/test/timeseries/test_measures_fx_vol.py b/gs_quant/test/timeseries/test_measures_fx_vol.py index b49a0262..44645a66 100644 --- a/gs_quant/test/timeseries/test_measures_fx_vol.py +++ b/gs_quant/test/timeseries/test_measures_fx_vol.py @@ -40,7 +40,13 @@ def test_currencypair_to_tdapi_fxfwd_asset(): mock_eur = Cross('MA8RY265Q34P7TWZ', 'EURUSD') + replace = Replacer() + xrefs = replace('gs_quant.timeseries.measures_fx_vol._get_tdapi_fxo_assets', Mock()) + xrefs.return_value = 'MA8RY265Q34P7TWZ' + bbid_mock = replace('gs_quant.timeseries.measures_fx_vol.Asset.get_identifier', Mock()) + bbid_mock.return_value = {'EURUSD'} assert _currencypair_to_tdapi_fxfwd_asset(mock_eur) == "MA8RY265Q34P7TWZ" + replace.restore() def test_currencypair_to_tdapi_fxo_asset(mocker): @@ -153,7 +159,9 @@ def test_get_tdapi_fxo_assets(): replace = Replacer() assets = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock()) assets.return_value = [mock_asset_1] - assert 'MAW8SAXPSKYA94E2' == tm_fxo._get_tdapi_fxo_assets() + kwargs = dict(asset_parameters_expiration_date='5y', asset_parameters_call_currency='USD', + asset_parameters_put_currency='EUR') + assert 'MAW8SAXPSKYA94E2' == tm_fxo._get_tdapi_fxo_assets(**kwargs) replace.restore() assets = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock()) @@ -199,7 +207,8 @@ def test_get_tdapi_fxo_assets(): def mock_curr(_cls, _q): d = { 'impliedVolatility': [1, 2, 3], - 'fwdPoints': [4, 5, 6] + 'fwdPoints': [4, 5, 6], + 'forwardPoint': [7, 8, 9] } df = MarketDataResponseFrame(data=d, index=_index * 3) df.dataset_ids = _test_datasets @@ -295,8 +304,16 @@ def test_fwd_points(mocker): args['settlement_date'] = '6m' args['real_time'] = True - with pytest.raises(NotImplementedError): - tm_fxo.fwd_points(**args) + xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock()) + xrefs.return_value = 'EURUSD' + identifiers = replace('gs_quant.timeseries.measures_fx_vol._get_tdapi_fxo_assets', Mock()) + identifiers.return_value = {'MAGZMXVM0J282ZTR'} + mocker.patch.object(GsDataApi, 'get_market_data', return_value=mock_curr(None, None)) + actual = tm_fxo.fwd_points(**args) + expected = tm.ExtendedSeries([7, 8, 9], index=_index * 3, name='forwardPoint') + expected.dataset_ids = _test_datasets + assert_series_equal(expected, actual) + assert actual.dataset_ids == _test_datasets args['real_time'] = False args['asset'] = Cross('MAGZMXVM0J282ZTR', 'EURUSD') diff --git a/gs_quant/test/utils/mock_request.py b/gs_quant/test/utils/mock_request.py index 7785254c..d1f37d60 100644 --- a/gs_quant/test/utils/mock_request.py +++ b/gs_quant/test/utils/mock_request.py @@ -20,6 +20,7 @@ from typing import List from unittest import mock +from gs_quant.errors import MqUninitialisedError from gs_quant.session import GsSession, Environment @@ -45,9 +46,12 @@ def __enter__(self): self.mocker.patch.object(self.api, self.method, side_effect=self.mock_calc_create_new_files if str( self.save_files).casefold() == 'new' else self.mock_calc_create_files) else: - from gs_quant.session import OAuth2Session - OAuth2Session.init = mock.MagicMock(return_value=None) - GsSession.use(Environment.PROD, 'fake_client_id', 'fake_secret', application=self.application) + try: + _ = GsSession.current + except MqUninitialisedError: + from gs_quant.session import OAuth2Session + OAuth2Session._authenticate = mock.MagicMock(return_value=None) + GsSession.use(Environment.PROD, 'fake_client_id', 'fake_secret', application=self.application) self.mocker.patch.object(self.api, self.method, side_effect=self.mock_calc) def mock_calc(self, *args, **kwargs): diff --git a/gs_quant/timeseries/backtesting.py b/gs_quant/timeseries/backtesting.py index caf689e9..bc952405 100644 --- a/gs_quant/timeseries/backtesting.py +++ b/gs_quant/timeseries/backtesting.py @@ -30,7 +30,7 @@ _logger = logging.getLogger(__name__) -RebalFreq = _create_enum('RebalFreq', ['Daily', 'Monthly']) +RebalFreq = _create_enum('RebalFreq', ['Daily', 'Weekly', 'Monthly']) ReturnType = _create_enum('ReturnType', ['excess_return']) @@ -70,12 +70,17 @@ def backtest_basket( if rebal_freq == RebalFreq.DAILY: rebal_dates = cal else: - # Get hypothetical monthly rebalances - num_rebals = (cal[-1].year - cal[0].year) * 12 + cal[-1].month - cal[0].month - rebal_dates = [cal[0] + i * rdelta(months=1) for i in range(num_rebals + 1)] - # Convert these to actual calendar days - rebal_dates = [d for d in rebal_dates if d < max(cal)] - rebal_dates = [min(cal[cal >= date]) for date in rebal_dates] + if rebal_freq == RebalFreq.WEEKLY: + # Get hypothetical weekly rebalances + num_rebals = ((cal[-1] - cal[0]).days) // 7 + rebal_dates = [cal[0] + i * rdelta(weeks=1) for i in range(num_rebals + 1)] + else: + # Get hypothetical monthly rebalances + num_rebals = (cal[-1].year - cal[0].year) * 12 + cal[-1].month - cal[0].month + rebal_dates = [cal[0] + i * rdelta(months=1) for i in range(num_rebals + 1)] + + # Convert the hypothetical weekly/monthly rebalance dates to actual calendar days + rebal_dates = [min(cal[cal >= date]) for date in rebal_dates if date < max(cal)] # Create Units dataframe units = pd.DataFrame(index=cal, columns=series.columns) @@ -140,7 +145,7 @@ def basket_series( :param series: list of time series of instrument prices :param weights: list of weights (defaults to evenly weight series) :param costs: list of execution costs in decimal (defaults to costs of 0) - :param rebal_freq: rebalancing frequency - Daily or Monthly (defaults to Daily) + :param rebal_freq: rebalancing frequency - Daily, Weekly or Monthly (defaults to Daily) :param return_type: return type of underlying instruments - only excess return is supported :return: time series of the resulting basket diff --git a/gs_quant/timeseries/measures.py b/gs_quant/timeseries/measures.py index 4b8a450c..a1966a9d 100644 --- a/gs_quant/timeseries/measures.py +++ b/gs_quant/timeseries/measures.py @@ -2240,7 +2240,9 @@ def carry_term(asset: Asset, pricing_date: Optional[GENERIC_DATE] = None, with DataContext(start, end): q = GsDataApi.build_market_data_query([asset_id], QueryType.FORWARD_POINT, where={}, source=source, real_time=real_time) - q_spot = GsDataApi.build_market_data_query([asset_id], QueryType.SPOT, where={}, source=source, + # setting pricing location as NYC as we have forward points only for NYC close + q_spot = GsDataApi.build_market_data_query([asset_id], QueryType.SPOT, where={'pricingLocation': 'NYC'}, + source=source, real_time=real_time) data_requests = [partial(_market_data_timed, q, request_id), partial(_market_data_timed, q_spot, request_id)] @@ -2252,15 +2254,19 @@ def carry_term(asset: Asset, pricing_date: Optional[GENERIC_DATE] = None, latest = df.index.max() _logger.info('selected pricing date %s', latest) df = df.loc[latest] - df.loc[:, 'expirationDate'] = df.index + df['tenor'].map(_to_offset) + cbd - cbd + df = df.assign(expirationDate=df.index + df['tenor'].map(_to_offset) + cbd - cbd) + df = df.set_index('expirationDate') df.sort_index(inplace=True) df = df.loc[DataContext.current.start_date: DataContext.current.end_date] spot = spot_df.loc[latest]['spot'] - df[forward_col_name] = df[forward_col_name] / spot + if annualized == FXSpotCarry.ANNUALIZED: - df[forward_col_name] = df[forward_col_name] * np.sqrt((df.index.to_series() - latest).dt.days / 252) - series = ExtendedSeries(dtype=float) if df.empty else ExtendedSeries(df[forward_col_name]) + df['carry'] = df[forward_col_name] * np.sqrt((df.index.to_series() - latest).dt.days / 252) / spot + else: + df['carry'] = df[forward_col_name] / spot + + series = ExtendedSeries(dtype=float) if df.empty else ExtendedSeries(df['carry']) series.name = 'carry' series.dataset_ids = tuple(dataset_ids) return series diff --git a/gs_quant/timeseries/measures_fx_vol.py b/gs_quant/timeseries/measures_fx_vol.py index 2322a8e4..5aa7cfd3 100644 --- a/gs_quant/timeseries/measures_fx_vol.py +++ b/gs_quant/timeseries/measures_fx_vol.py @@ -289,8 +289,17 @@ def get_defaults_for_cross(self, cross: str): } -def _currencypair_to_tdapi_fxfwd_asset(_asset_spec: ASSET_SPEC) -> str: - return "MA8RY265Q34P7TWZ" +def _currencypair_to_tdapi_fxfwd_asset(asset_spec: ASSET_SPEC) -> str: + asset = _asset_from_spec(asset_spec) + bbid = asset.get_identifier(AssetIdentifier.BLOOMBERG_ID) + + kwargs = dict(asset_class='FX', type='Forward', + asset_parameters_pair=bbid, + asset_parameters_settlement_date='1y', + ) + + mqid = _get_tdapi_fxo_assets(**kwargs) + return mqid def _currencypair_to_tdapi_fxo_asset(asset_spec: ASSET_SPEC) -> str: @@ -323,6 +332,47 @@ def _get_tdapi_fxo_assets(**kwargs) -> Union[str, list]: return assets[0].id +def get_fxo_asset(asset: Asset, expiry_tenor: str, strike: str, option_type: str = None, + expiration_location: str = None, premium_payment_date: str = None) -> str: + cross = asset.get_identifier(AssetIdentifier.BLOOMBERG_ID) + + if cross not in FX_DEFAULTS.keys(): + raise NotImplementedError('Data not available for {} FX Vanilla options'.format(cross)) + + defaults = _get_fxo_defaults(cross) + + if not (tm_rates._is_valid_relative_date_tenor(expiry_tenor)): + raise MqValueError('invalid expiry ' + expiry_tenor) + + if expiration_location is None: + _ = defaults["expirationTime"] + else: + _ = expiration_location + + if premium_payment_date is None: + premium_date = defaults["premiumPaymentDate"] + else: + premium_date = premium_payment_date + + if option_type == "Put": + call_ccy = defaults["over"] + put_ccy = defaults["under"] + else: + call_ccy = defaults["under"] + put_ccy = defaults["over"] + + kwargs = dict(asset_class='FX', type='Option', + asset_parameters_call_currency=call_ccy, + asset_parameters_put_currency=put_ccy, + asset_parameters_expiration_date=expiry_tenor, + asset_parameters_option_type=option_type, + asset_parameters_premium_payment_date=premium_date, + asset_parameters_strike_price_relative=strike, + ) + + return _get_tdapi_fxo_assets(**kwargs) + + def _get_tdapi_fxo_assets_vol_swaps(**kwargs) -> Union[str, list]: # sanitize input for asset query. @@ -376,7 +426,7 @@ def _get_fx_vol_swap_data(asset: Asset, expiry_tenor: str, strike_type: str = No query_type: QueryType = QueryType.STRIKE_VOL) \ -> pd.DataFrame: if real_time: - raise NotImplementedError('realtime inflation swap data not implemented') + raise NotImplementedError('realtime FX Vol swap data not implemented') cross = asset.get_identifier(AssetIdentifier.BLOOMBERG_ID) @@ -410,7 +460,13 @@ def _get_fxfwd_data(asset: Asset, settlement_date: str, query_type: QueryType = QueryType.FWD_POINTS) \ -> pd.DataFrame: if real_time: - raise NotImplementedError('realtime inflation swap data not implemented') + mqid = asset.get_identifier(AssetIdentifier.MARQUEE_ID) + q = GsDataApi.build_market_data_query([mqid], QueryType.FORWARD_POINT, source=source, + real_time=real_time, where={'tenor': settlement_date}) + _logger.debug('q %s', q) + df = _market_data_timed(q) + return df + cross = asset.get_identifier(AssetIdentifier.BLOOMBERG_ID) if not (tm_rates._is_valid_relative_date_tenor(settlement_date)): @@ -421,7 +477,7 @@ def _get_fxfwd_data(asset: Asset, settlement_date: str, asset_parameters_settlement_date=settlement_date, ) - rate_mqid = _get_tdapi_fxo_assets(**kwargs) + mqid = _get_tdapi_fxo_assets(**kwargs) if location is None: pricing_location = PricingLocation.NYC @@ -430,7 +486,7 @@ def _get_fxfwd_data(asset: Asset, settlement_date: str, where = dict(pricingLocation=pricing_location.value) - q = GsDataApi.build_market_data_query([rate_mqid], query_type, where=where, source=source, + q = GsDataApi.build_market_data_query([mqid], query_type, where=where, source=source, real_time=real_time) _logger.debug('q %s', q) df = _market_data_timed(q) @@ -444,44 +500,10 @@ def _get_fxo_data(asset: Asset, expiry_tenor: str, strike: str, option_type: str query_type: QueryType = QueryType.IMPLIED_VOLATILITY) \ -> pd.DataFrame: if real_time: - raise NotImplementedError('realtime inflation swap data not implemented') - cross = asset.get_identifier(AssetIdentifier.BLOOMBERG_ID) - - if cross not in FX_DEFAULTS.keys(): - raise NotImplementedError('Data not available for {} FX Vanilla options'.format(cross)) - - defaults = _get_fxo_defaults(cross) - - if not (tm_rates._is_valid_relative_date_tenor(expiry_tenor)): - raise MqValueError('invalid expiry ' + expiry_tenor) - - if expiration_location is None: - _ = defaults["expirationTime"] - else: - _ = expiration_location - - if premium_payment_date is None: - premium_date = defaults["premiumPaymentDate"] - else: - premium_date = premium_payment_date + raise NotImplementedError('realtime FX Option data not implemented') - if option_type == "Put": - call_ccy = defaults["over"] - put_ccy = defaults["under"] - else: - call_ccy = defaults["under"] - put_ccy = defaults["over"] - - kwargs = dict(asset_class='FX', type='Option', - asset_parameters_call_currency=call_ccy, - asset_parameters_put_currency=put_ccy, - asset_parameters_expiration_date=expiry_tenor, - asset_parameters_option_type=option_type, - asset_parameters_premium_payment_date=premium_date, - asset_parameters_strike_price_relative=strike, - ) - - asset_mqid = _get_tdapi_fxo_assets(**kwargs) + asset_mqid = get_fxo_asset(asset=asset, expiry_tenor=expiry_tenor, strike=strike, option_type=option_type, + expiration_location=expiration_location, premium_payment_date=premium_payment_date) if location is None: pricing_location = PricingLocation.NYC @@ -573,7 +595,7 @@ def legacy_implied_volatility(asset: Asset, tenor: str, strike_reference: VolRef """ -@plot_measure((AssetClass.FX,), None, +@plot_measure((AssetClass.FX,), (AssetType.Cross,), [MeasureDependency(id_provider=cross_stored_direction_for_fx_vol, query_type=QueryType.IMPLIED_VOLATILITY)], display_name="implied_volatility") @@ -607,6 +629,10 @@ def implied_volatility_fxvol(asset: Asset, tenor: str, strike_reference: VolRefe cross = _cross_stored_direction_helper(bbid) if cross != bbid: cross_asset = SecurityMaster.get_asset(cross, AssetIdentifier.BLOOMBERG_ID) + if strike_reference.value == VolReference.DELTA_CALL.value: + strike_reference = VolReference.DELTA_PUT + elif strike_reference.value == VolReference.DELTA_PUT.value: + strike_reference = VolReference.DELTA_CALL else: cross_asset = asset else: @@ -659,8 +685,10 @@ def fwd_points(asset: Asset, settlement_date: str, df = _get_fxfwd_data(asset=asset, settlement_date=settlement_date, location=location, source=source, real_time=real_time, query_type=QueryType.FWD_POINTS) - - series = ExtendedSeries(dtype=float) if df.empty else ExtendedSeries(df['fwdPoints']) + if real_time: + series = ExtendedSeries(dtype=float) if df.empty else ExtendedSeries(df['forwardPoint']) + else: + series = ExtendedSeries(dtype=float) if df.empty else ExtendedSeries(df['fwdPoints']) series.dataset_ids = getattr(df, 'dataset_ids', ()) return series