Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Jl/add tour data to output #43

Open
wants to merge 10 commits into
base: main
Choose a base branch
from
4 changes: 2 additions & 2 deletions Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -6,14 +6,14 @@ ENV FULL_CONDA_PATH $CONDA_DIR/envs/$CONDA_ENV


ENV ASIM_PATH /activitysim
ENV ASIM_SUBDIR example
ENV ASIM_SUBDIR examples
ENV EXEC_NAME simulation.py

RUN apt-get --allow-releaseinfo-change update \
&& apt-get install -y build-essential zip unzip
RUN conda update conda --yes

RUN git clone https://github.com/ual/activitysim.git
RUN git clone -b zn/buffered-road-geoms https://github.com/LBNL-UCB-STI/activitysim.git

RUN conda env create --quiet -p $FULL_CONDA_PATH --file activitysim/environment.yml
RUN cd activitysim && $FULL_CONDA_PATH/bin/python setup.py install
Expand Down
30 changes: 21 additions & 9 deletions activitysim/abm/models/generate_beam_plans.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
import logging
import warnings

from activitysim.core import pipeline
from activitysim.core import pipeline, orca
from activitysim.core import inject

logger = logging.getLogger('activitysim')
Expand Down Expand Up @@ -141,23 +141,32 @@ def generate_beam_plans():
tours = pipeline.get_table('tours')
persons = pipeline.get_table('persons')
households = pipeline.get_table('households')
land_use = pipeline.get_table('land_use').reset_index()

# re-create zones shapefile
land_use['geometry'] = land_use['geometry'].apply(wkt.loads)
zones = gpd.GeoDataFrame(land_use, geometry='geometry', crs='EPSG:4326')
zones.geometry = zones.geometry.buffer(0)
if orca.is_table('beam_geoms'):
beam_geoms = pipeline.get_table('beam_geoms')
beam_geoms['geometry'] = gpd.GeoSeries.from_wkt(beam_geoms['geometry'])
zones = gpd.GeoDataFrame(beam_geoms, geometry='geometry', crs='EPSG:4326')
zones.geometry = zones.geometry.buffer(0)
else:
land_use = pipeline.get_table('land_use').reset_index()

# re-create zones shapefile
land_use['geometry'] = land_use['geometry'].apply(wkt.loads)
zones = gpd.GeoDataFrame(land_use, geometry='geometry', crs='EPSG:4326')
zones.geometry = zones.geometry.buffer(0)

# augment trips table with attrs we need to generate plans
trips = get_trip_coords(trips, zones, persons)
trips['departure_time'] = generate_departure_times(trips, tours)
trips['number_of_participants'] = trips['tour_id'].map(
tours['number_of_participants'])
trips['tour_mode'] = trips['tour_id'].map(
tours['tour_mode'])

# trim trips table
cols = [
'person_id', 'departure_time', 'purpose', 'origin',
'destination', 'number_of_participants', 'trip_mode', 'x', 'y']
'person_id', 'tour_id', 'departure_time', 'purpose', 'origin',
'destination', 'number_of_participants', 'tour_mode', 'trip_mode', 'x', 'y']
sorted_trips = trips[cols].sort_values(
['person_id', 'departure_time']).reset_index()

Expand Down Expand Up @@ -208,10 +217,13 @@ def generate_beam_plans():

final_plans['trip_id'] = final_plans['trip_id'].shift()
final_plans['trip_mode'] = final_plans['trip_mode'].shift()
final_plans['tour_id'] = final_plans['tour_id'].shift()
final_plans['tour_mode'] = final_plans['tour_mode'].shift()
final_plans['number_of_participants'] = final_plans[
'number_of_participants'].shift()

final_plans = final_plans[[
'trip_id', 'person_id', 'number_of_participants', 'trip_mode',
'tour_id', 'trip_id', 'person_id', 'number_of_participants', 'tour_mode', 'trip_mode',
'PlanElementIndex', 'ActivityElement', 'ActivityType', 'x', 'y',
'departure_time']]

Expand Down
6 changes: 6 additions & 0 deletions activitysim/abm/models/initialize.py
Original file line number Diff line number Diff line change
Expand Up @@ -86,6 +86,12 @@ def initialize_landuse():

model_settings = config.read_model_settings('initialize_landuse.yaml', mandatory=True)

beam_geometries_path = config.setting('beam_geometries')
data_file_path = config.data_file_path(beam_geometries_path, mandatory=True)

beam_geom_dataframe = pd.read_csv(data_file_path)
pipeline.rewrap("beam_geoms", beam_geom_dataframe)

annotate_tables(model_settings, trace_label)

# create accessibility (only required if multiprocessing wants to slice accessibility)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -31,29 +31,12 @@ local,_DF_IS_TOUR,'tour_type' in df.columns
,origin_walk_time,shortWalk*60/walkSpeed
,destination_walk_time,shortWalk*60/walkSpeed
# RIDEHAIL,,
,origin_density_measure,"(reindex(land_use.TOTPOP, df[orig_col_name]) + reindex(land_use.TOTEMP, df[orig_col_name])) / (reindex(land_use.TOTACRE, df[orig_col_name]) / 640)"
,dest_density_measure,"(reindex(land_use.TOTPOP, df[dest_col_name]) + reindex(land_use.TOTEMP, df[dest_col_name])) / (reindex(land_use.TOTACRE, df[dest_col_name]) / 640)"
,origin_density,"pd.cut(origin_density_measure, bins=[-np.inf, 500, 2000, 5000, 15000, np.inf], labels=[5, 4, 3, 2, 1]).astype(int)"
,dest_density,"pd.cut(dest_density_measure, bins=[-np.inf, 500, 2000, 5000, 15000, np.inf], labels=[5, 4, 3, 2, 1]).astype(int)"
,origin_zone_taxi_wait_time_mean,"origin_density.map({k: v for k, v in Taxi_waitTime_mean.items()})"
,origin_zone_taxi_wait_time_sd,"origin_density.map({k: v for k, v in Taxi_waitTime_sd.items()})"
,dest_zone_taxi_wait_time_mean,"dest_density.map({k: v for k, v in Taxi_waitTime_mean.items()})"
,dest_zone_taxi_wait_time_sd,"dest_density.map({k: v for k, v in Taxi_waitTime_sd.items()})"
# ,, Note that the mean and standard deviation are not the values for the distribution itself but of the underlying normal distribution it is derived from
,origTaxiWaitTime,"rng.lognormal_for_df(df, mu=origin_zone_taxi_wait_time_mean, sigma=origin_zone_taxi_wait_time_sd, broadcast=True, scale=True).clip(min_waitTime, max_waitTime)"
,destTaxiWaitTime,"rng.lognormal_for_df(df, mu=dest_zone_taxi_wait_time_mean, sigma=dest_zone_taxi_wait_time_sd, broadcast=True, scale=True).clip(min_waitTime, max_waitTime)"
,origin_zone_singleTNC_wait_time_mean,"origin_density.map({k: v for k, v in TNC_single_waitTime_mean.items()})"
,origin_zone_singleTNC_wait_time_sd,"origin_density.map({k: v for k, v in TNC_single_waitTime_sd.items()})"
,dest_zone_singleTNC_wait_time_mean,"dest_density.map({k: v for k, v in TNC_single_waitTime_mean.items()})"
,dest_zone_singleTNC_wait_time_sd,"dest_density.map({k: v for k, v in TNC_single_waitTime_sd.items()})"
,origSingleTNCWaitTime,"rng.lognormal_for_df(df, mu=origin_zone_singleTNC_wait_time_mean, sigma=origin_zone_singleTNC_wait_time_sd, broadcast=True, scale=True).clip(min_waitTime, max_waitTime)"
,destSingleTNCWaitTime,"rng.lognormal_for_df(df, mu=dest_zone_singleTNC_wait_time_mean, sigma=dest_zone_singleTNC_wait_time_sd, broadcast=True, scale=True).clip(min_waitTime, max_waitTime)"
,origin_zone_sharedTNC_wait_time_mean,"origin_density.map({k: v for k, v in TNC_shared_waitTime_mean.items()})"
,origin_zone_sharedTNC_wait_time_sd,"origin_density.map({k: v for k, v in TNC_shared_waitTime_sd.items()})"
,dest_zone_sharedTNC_wait_time_mean,"dest_density.map({k: v for k, v in TNC_shared_waitTime_mean.items()})"
,dest_zone_sharedTNC_wait_time_sd,"dest_density.map({k: v for k, v in TNC_shared_waitTime_sd.items()})"
,origSharedTNCWaitTime,"rng.lognormal_for_df(df, mu=origin_zone_sharedTNC_wait_time_mean, sigma=origin_zone_sharedTNC_wait_time_sd, broadcast=True, scale=True).clip(min_waitTime, max_waitTime)"
,destSharedTNCWaitTime,"rng.lognormal_for_df(df, mu=dest_zone_sharedTNC_wait_time_mean, sigma=dest_zone_sharedTNC_wait_time_sd, broadcast=True, scale=True).clip(min_waitTime, max_waitTime)"
,origTaxiWaitTime,"odt_skims['RH_SOLO_WAIT']"
,destTaxiWaitTime,"dot_skims['RH_SOLO_WAIT']"
,origSingleTNCWaitTime,"odt_skims['RH_SOLO_WAIT']"
,destSingleTNCWaitTime,"dot_skims['RH_SOLO_WAIT']"
,origSharedTNCWaitTime,"odt_skims['RH_POOLED_WAIT']"
,destSharedTNCWaitTime,"dot_skims['RH_POOLED_WAIT']"
,totalWaitTaxi,origTaxiWaitTime + destTaxiWaitTime
,totalWaitSingleTNC,origSingleTNCWaitTime + destSingleTNCWaitTime
,totalWaitSharedTNC,origSharedTNCWaitTime + destSharedTNCWaitTime
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -45,18 +45,9 @@ dest terminal time not counted at home,_dest_terminal_time,"np.where(inbound & l
,origin_walk_time,shortWalk*60/walkSpeed
,destination_walk_time,shortWalk*60/walkSpeed
# RIDEHAIL,,
,origin_density_measure,"(reindex(land_use.TOTPOP, df[orig_col_name]) + reindex(land_use.TOTEMP, df[orig_col_name])) / (reindex(land_use.TOTACRE, df[orig_col_name]) / 640)"
,origin_density,"pd.cut(origin_density_measure, bins=[-np.inf, 500, 2000, 5000, 15000, np.inf], labels=[5, 4, 3, 2, 1]).astype(int)"
,origin_zone_taxi_wait_time_mean,"origin_density.map({k: v for k, v in Taxi_waitTime_mean.items()})"
,origin_zone_taxi_wait_time_sd,"origin_density.map({k: v for k, v in Taxi_waitTime_sd.items()})"
# ,, Note that the mean and standard deviation are not the values for the distribution itself but of the underlying normal distribution it is derived from
,origTaxiWaitTime,"rng.lognormal_for_df(df, mu=origin_zone_taxi_wait_time_mean, sigma=origin_zone_taxi_wait_time_sd, broadcast=True, scale=True).clip(min_waitTime, max_waitTime)"
,origin_zone_singleTNC_wait_time_mean,"origin_density.map({k: v for k, v in TNC_single_waitTime_mean.items()})"
,origin_zone_singleTNC_wait_time_sd,"origin_density.map({k: v for k, v in TNC_single_waitTime_sd.items()})"
,origSingleTNCWaitTime,"rng.lognormal_for_df(df, mu=origin_zone_singleTNC_wait_time_mean, sigma=origin_zone_singleTNC_wait_time_sd, broadcast=True, scale=True).clip(min_waitTime, max_waitTime)"
,origin_zone_sharedTNC_wait_time_mean,"origin_density.map({k: v for k, v in TNC_shared_waitTime_mean.items()})"
,origin_zone_sharedTNC_wait_time_sd,"origin_density.map({k: v for k, v in TNC_shared_waitTime_sd.items()})"
,origSharedTNCWaitTime,"rng.lognormal_for_df(df, mu=origin_zone_sharedTNC_wait_time_mean, sigma=origin_zone_sharedTNC_wait_time_sd, broadcast=True, scale=True).clip(min_waitTime, max_waitTime)"
,origTaxiWaitTime,"odt_skims['RH_SOLO_WAIT']"
,origSingleTNCWaitTime,"odt_skims['RH_SOLO_WAIT']"
,origSharedTNCWaitTime,"odt_skims['RH_POOLED_WAIT']"
#,,
,sov_available,odt_skims['SOV_TIME']>0
,hov2_available,odt_skims['HOV2_TIME']>0
Expand Down
1 change: 1 addition & 0 deletions bay_area/configs/configs/tour_mode_choice.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -127,6 +127,7 @@ CONSTANTS:
5: 0
min_waitTime: 0
max_waitTime: 50
TNC_missed_trip_penalty: 50

ivt_cost_multiplier: 0.6
ivt_lrt_multiplier: 0.9
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -31,29 +31,13 @@ local,_DF_IS_TOUR,'tour_type' in df.columns
,origin_walk_time,shortWalk*60/walkSpeed
,destination_walk_time,shortWalk*60/walkSpeed
# RIDEHAIL,,
,origin_density_measure,"(reindex(land_use.TOTPOP, df[orig_col_name]) + reindex(land_use.TOTEMP, df[orig_col_name])) / (reindex(land_use.TOTACRE, df[orig_col_name]) / 640)"
,dest_density_measure,"(reindex(land_use.TOTPOP, df[dest_col_name]) + reindex(land_use.TOTEMP, df[dest_col_name])) / (reindex(land_use.TOTACRE, df[dest_col_name]) / 640)"
,origin_density,"pd.cut(origin_density_measure, bins=[-np.inf, 500, 2000, 5000, 15000, np.inf], labels=[5, 4, 3, 2, 1]).astype(int)"
,dest_density,"pd.cut(dest_density_measure, bins=[-np.inf, 500, 2000, 5000, 15000, np.inf], labels=[5, 4, 3, 2, 1]).astype(int)"
,origin_zone_taxi_wait_time_mean,"origin_density.map({k: v for k, v in Taxi_waitTime_mean.items()})"
,origin_zone_taxi_wait_time_sd,"origin_density.map({k: v for k, v in Taxi_waitTime_sd.items()})"
,dest_zone_taxi_wait_time_mean,"dest_density.map({k: v for k, v in Taxi_waitTime_mean.items()})"
,dest_zone_taxi_wait_time_sd,"dest_density.map({k: v for k, v in Taxi_waitTime_sd.items()})"
# ,, Note that the mean and standard deviation are not the values for the distribution itself, but of the underlying normal distribution it is derived from
,origTaxiWaitTime,"rng.lognormal_for_df(df, mu=origin_zone_taxi_wait_time_mean, sigma=origin_zone_taxi_wait_time_sd, broadcast=True, scale=True).clip(min_waitTime, max_waitTime)"
,destTaxiWaitTime,"rng.lognormal_for_df(df, mu=dest_zone_taxi_wait_time_mean, sigma=dest_zone_taxi_wait_time_sd, broadcast=True, scale=True).clip(min_waitTime, max_waitTime)"
,origin_zone_singleTNC_wait_time_mean,"origin_density.map({k: v for k, v in TNC_single_waitTime_mean.items()})"
,origin_zone_singleTNC_wait_time_sd,"origin_density.map({k: v for k, v in TNC_single_waitTime_sd.items()})"
,dest_zone_singleTNC_wait_time_mean,"dest_density.map({k: v for k, v in TNC_single_waitTime_mean.items()})"
,dest_zone_singleTNC_wait_time_sd,"dest_density.map({k: v for k, v in TNC_single_waitTime_sd.items()})"
,origSingleTNCWaitTime,"rng.lognormal_for_df(df, mu=origin_zone_singleTNC_wait_time_mean, sigma=origin_zone_singleTNC_wait_time_sd, broadcast=True, scale=True).clip(min_waitTime, max_waitTime)"
,destSingleTNCWaitTime,"rng.lognormal_for_df(df, mu=dest_zone_singleTNC_wait_time_mean, sigma=dest_zone_singleTNC_wait_time_sd, broadcast=True, scale=True).clip(min_waitTime, max_waitTime)"
,origin_zone_sharedTNC_wait_time_mean,"origin_density.map({k: v for k, v in TNC_shared_waitTime_mean.items()})"
,origin_zone_sharedTNC_wait_time_sd,"origin_density.map({k: v for k, v in TNC_shared_waitTime_sd.items()})"
,dest_zone_sharedTNC_wait_time_mean,"dest_density.map({k: v for k, v in TNC_shared_waitTime_mean.items()})"
,dest_zone_sharedTNC_wait_time_sd,"dest_density.map({k: v for k, v in TNC_shared_waitTime_sd.items()})"
,origSharedTNCWaitTime,"rng.lognormal_for_df(df, mu=origin_zone_sharedTNC_wait_time_mean, sigma=origin_zone_sharedTNC_wait_time_sd, broadcast=True, scale=True).clip(min_waitTime, max_waitTime)"
,destSharedTNCWaitTime,"rng.lognormal_for_df(df, mu=dest_zone_sharedTNC_wait_time_mean, sigma=dest_zone_sharedTNC_wait_time_sd, broadcast=True, scale=True).clip(min_waitTime, max_waitTime)"
,origTaxiWaitTime,"odt_skims['RH_SOLO_WAIT'] * (1.0 - odt_skims['RH_SOLO_REJECTIONPROB']) + TNC_missed_trip_penalty * odt_skims['RH_SOLO_REJECTIONPROB']"
,destTaxiWaitTime,"dot_skims['RH_SOLO_WAIT'] * (1.0 - dot_skims['RH_SOLO_REJECTIONPROB']) + TNC_missed_trip_penalty * dot_skims['RH_SOLO_REJECTIONPROB']"
,origSingleTNCWaitTime,"odt_skims['RH_SOLO_WAIT'] * (1.0 - odt_skims['RH_SOLO_REJECTIONPROB']) + TNC_missed_trip_penalty * odt_skims['RH_SOLO_REJECTIONPROB']"
,destSingleTNCWaitTime,"dot_skims['RH_SOLO_WAIT'] * (1.0 - dot_skims['RH_SOLO_REJECTIONPROB']) + TNC_missed_trip_penalty * dot_skims['RH_SOLO_REJECTIONPROB']"
,origSharedTNCWaitTime,"odt_skims['RH_POOLED_WAIT'] * (1.0 - odt_skims['RH_POOLED_REJECTIONPROB']) + TNC_missed_trip_penalty * odt_skims['RH_POOLED_REJECTIONPROB']"
,destSharedTNCWaitTime,"dot_skims['RH_POOLED_WAIT'] * (1.0 - dot_skims['RH_POOLED_REJECTIONPROB']) + TNC_missed_trip_penalty * dot_skims['RH_POOLED_REJECTIONPROB']"
,totalWaitTaxi,origTaxiWaitTime + destTaxiWaitTime
,totalWaitSingleTNC,origSingleTNCWaitTime + destSingleTNCWaitTime
,totalWaitSharedTNC,origSharedTNCWaitTime + destSharedTNCWaitTime
Expand Down
1 change: 1 addition & 0 deletions bay_area/configs/configs/trip_mode_choice.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -157,6 +157,7 @@ CONSTANTS:
5: 0
min_waitTime: 0
max_waitTime: 50
TNC_missed_trip_penalty: 50

# so far, we can use the same spec as for non-joint tours
preprocessor:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -44,19 +44,10 @@ dest terminal time not counted at home,_dest_terminal_time,"np.where(inbound & l
,drive_transit_available,"np.where(df.outbound, _walk_transit_destination, _walk_transit_origin) & (df.auto_ownership > 0)"
,origin_walk_time,shortWalk*60/walkSpeed
,destination_walk_time,shortWalk*60/walkSpeed
# RIDEHAIL,,
,origin_density_measure,"(reindex(land_use.TOTPOP, df[orig_col_name]) + reindex(land_use.TOTEMP, df[orig_col_name])) / (reindex(land_use.TOTACRE, df[orig_col_name]) / 640)"
,origin_density,"pd.cut(origin_density_measure, bins=[-np.inf, 500, 2000, 5000, 15000, np.inf], labels=[5, 4, 3, 2, 1]).astype(int)"
,origin_zone_taxi_wait_time_mean,"origin_density.map({k: v for k, v in Taxi_waitTime_mean.items()})"
,origin_zone_taxi_wait_time_sd,"origin_density.map({k: v for k, v in Taxi_waitTime_sd.items()})"
# ,, Note that the mean and standard deviation are not the values for the distribution itself, but of the underlying normal distribution it is derived from
,origTaxiWaitTime,"rng.lognormal_for_df(df, mu=origin_zone_taxi_wait_time_mean, sigma=origin_zone_taxi_wait_time_sd, broadcast=True, scale=True).clip(min_waitTime, max_waitTime)"
,origin_zone_singleTNC_wait_time_mean,"origin_density.map({k: v for k, v in TNC_single_waitTime_mean.items()})"
,origin_zone_singleTNC_wait_time_sd,"origin_density.map({k: v for k, v in TNC_single_waitTime_sd.items()})"
,origSingleTNCWaitTime,"rng.lognormal_for_df(df, mu=origin_zone_singleTNC_wait_time_mean, sigma=origin_zone_singleTNC_wait_time_sd, broadcast=True, scale=True).clip(min_waitTime, max_waitTime)"
,origin_zone_sharedTNC_wait_time_mean,"origin_density.map({k: v for k, v in TNC_shared_waitTime_mean.items()})"
,origin_zone_sharedTNC_wait_time_sd,"origin_density.map({k: v for k, v in TNC_shared_waitTime_sd.items()})"
,origSharedTNCWaitTime,"rng.lognormal_for_df(df, mu=origin_zone_sharedTNC_wait_time_mean, sigma=origin_zone_sharedTNC_wait_time_sd, broadcast=True, scale=True).clip(min_waitTime, max_waitTime)"
# RIDEHAIL,, Could just define this in trip_mode_choice.csv but leaving this for now so that we can factor in rejected trips if needed
,origTaxiWaitTime,"odt_skims['RH_SOLO_WAIT'] * (1.0 - odt_skims['RH_SOLO_REJECTIONPROB']) + TNC_missed_trip_penalty * odt_skims['RH_SOLO_REJECTIONPROB']"
,origSingleTNCWaitTime,"odt_skims['RH_SOLO_WAIT'] * (1.0 - odt_skims['RH_SOLO_REJECTIONPROB']) + TNC_missed_trip_penalty * odt_skims['RH_SOLO_REJECTIONPROB']"
,origSharedTNCWaitTime,"odt_skims['RH_POOLED_WAIT'] * (1.0 - odt_skims['RH_POOLED_REJECTIONPROB']) + TNC_missed_trip_penalty * odt_skims['RH_POOLED_REJECTIONPROB']"
#,,
,sov_available,odt_skims['SOV_TIME']>0
,hov2_available,odt_skims['HOV2_TIME']>0
Expand Down
3 changes: 3 additions & 0 deletions bay_area/configs/settings.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,9 @@ input_table_list:
# input skims
skims_file: skims.omx

# input valid geometries
beam_geometries: clipped_tazs.csv

# raise error if any sub-process fails without waiting for others to complete
fail_fast: True

Expand Down