Skip to content

Commit

Permalink
Merge pull request #708 from opentargets/dev
Browse files Browse the repository at this point in the history
chore: trigger release process
  • Loading branch information
project-defiant authored Jul 22, 2024
2 parents 2ea1814 + ce8f46b commit 2a7544e
Show file tree
Hide file tree
Showing 5 changed files with 68 additions and 59 deletions.
5 changes: 3 additions & 2 deletions .github/workflows/release.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -34,14 +34,15 @@ jobs:
fetch-depth: 0
repository: opentargets/gentropy
token: ${{ secrets.GITHUB_TOKEN }}
persist-credentials: false

- name: Python Semantic Release
id: semrelease
# v9.6.0 is required due to the python v3.12 in the newer version of semantic release action which
# breaks the poetry build command.
uses: python-semantic-release/[email protected]
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
github_token: ${{ steps.trigger-token.outputs.token }}

- name: Publish package to GitHub Release
uses: python-semantic-release/upload-to-gh-release@main
Expand All @@ -51,7 +52,7 @@ jobs:
# requires using GH_APP to authenitcate, otherwise push authorised with
# the GITHUB_TOKEN does not trigger the tag artifact workflow.
# see https://github.com/actions/create-github-app-token
github_token: ${{ steps.trigger-token.outputs.token }}
github_token: ${{ secrets.GITHUB_TOKEN }}
tag: ${{ steps.semrelease.outputs.tag }}

- name: Store the distribution packages
Expand Down
94 changes: 48 additions & 46 deletions poetry.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

4 changes: 2 additions & 2 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ google-cloud-secret-manager = "^2.20.0"

[tool.poetry.dev-dependencies]
pre-commit = "^3.7.0"
mypy = "^1.10"
mypy = "^1.11"
pep8-naming = "^0.14.1"
interrogate = "^1.7.0"
isort = "^5.13.2"
Expand Down Expand Up @@ -75,7 +75,7 @@ apache-airflow = "^2.8.0"
apache-airflow-providers-google = "^10.13.1"
pydoclint = ">=0.3.8,<0.6.0"
prettier = "^0.0.7"
deptry = ">=0.12,<0.17"
deptry = ">=0.12,<0.18"
yamllint = "^1.33.0"

[tool.semantic_release]
Expand Down
4 changes: 2 additions & 2 deletions src/gentropy/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -427,9 +427,9 @@ class FinemapperConfig(StepConfig):
"start_hail": True,
}
)
study_locus_to_finemap: str = MISSING
study_index_path: str = MISSING
output_path: str = MISSING
study_locus_manifest_path: str = MISSING
study_locus_index: int = MISSING
max_causal_snps: int = MISSING
primary_signal_pval_threshold: float = MISSING
secondary_signal_pval_threshold: float = MISSING
Expand Down
20 changes: 13 additions & 7 deletions src/gentropy/susie_finemapper.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,9 +44,9 @@ class SusieFineMapperStep:
def __init__(
self,
session: Session,
study_locus_to_finemap: str,
study_index_path: str,
output_path: str,
study_locus_manifest_path: str,
study_locus_index: int,
max_causal_snps: int = 10,
primary_signal_pval_threshold: float = 1,
secondary_signal_pval_threshold: float = 1,
Expand All @@ -65,9 +65,9 @@ def __init__(
Args:
session (Session): Spark session
study_locus_to_finemap (str): path to the study locus to fine-map
study_index_path (str): path to the study index
output_path (str): path to the output
study_locus_manifest_path (str): Path to the CSV manifest containing all study locus input and output locations. Should contain two columns: study_locus_input and study_locus_output
study_locus_index (int): Index (0-based) of the locus in the manifest to process in this call
max_causal_snps (int): Maximum number of causal variants in locus, default is 10
primary_signal_pval_threshold (float): p-value threshold for the lead variant from the primary signal (credibleSetIndex==1), default is 5e-8
secondary_signal_pval_threshold (float): p-value threshold for the lead variant from the secondary signals, default is 1e-7
Expand All @@ -82,9 +82,15 @@ def __init__(
imputed_r2_threshold (float): imputed R2 threshold, default is 0.9
ld_score_threshold (float): LD score threshold ofr imputation, default is 5
"""
# Read locus manifest.
study_locus_manifest = pd.read_csv(study_locus_manifest_path)
row = study_locus_manifest.loc[study_locus_index]
study_locus_input = row["study_locus_input"]
study_locus_output = row["study_locus_output"]

# Read studyLocus
study_locus = (
StudyLocus.from_parquet(session, study_locus_to_finemap)
StudyLocus.from_parquet(session, study_locus_input)
.df.withColumn(
"studyLocusId", StudyLocus.assign_study_locus_id("studyId", "variantId")
)
Expand Down Expand Up @@ -115,11 +121,11 @@ def __init__(
if result_logging is not None:
# Write result
result_logging["study_locus"].df.write.mode(session.write_mode).parquet(
output_path
study_locus_output
)
# Write log
result_logging["log"].to_parquet(
output_path + ".log",
study_locus_output + ".log",
engine="pyarrow",
index=False,
)
Expand Down

0 comments on commit 2a7544e

Please sign in to comment.