Skip to content

Replace base SHA with original pull request base SHA instead of updated one so diff is more restrictive #929

Replace base SHA with original pull request base SHA instead of updated one so diff is more restrictive

Replace base SHA with original pull request base SHA instead of updated one so diff is more restrictive #929

Workflow file for this run

# This workflow runs the pipeline with the minimal test dataset to check that it completes without any syntax errors
name: nf-core CI
on:
pull_request:
release:
types: [published]
merge_group:
types:
- checks_requested
branches:
- master
- dev
env:
NXF_ANSI_LOG: false
NFT_VER: "0.8.4"
NFT_WORKDIR: "~"
NFT_DIFF: "pdiff"
NFT_DIFF_ARGS: "--line-numbers --expand-tabs=2"
concurrency:
group: "${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}"
cancel-in-progress: true
jobs:
changes:
name: Check for changes
runs-on: ubuntu-latest
outputs:
changes: ${{ steps.changed_files.outputs.any_modified }}
tags: ${{ steps.list.outputs.tags }}
steps:
- uses: actions/setup-python@v4
with:
python-version: "3.11"
architecture: "x64"
- uses: actions/checkout@v4
with:
fetch-depth: 0
- name: get-common-commit-hash
id: hash
run: |
git fetch origin ${{ github.base_ref }}
echo commit=$(git merge-base --fork-point origin/${{ github.ref }} ${{ github.base_ref }}) >> $GITHUB_OUTPUT
- uses: tj-actions/changed-files@v42
id: changed_files
with:
base_sha: ${{ steps.hash.outputs.commit }}
dir_names: "true"
output_renamed_files_as_deleted_and_added: "true"
# Define list of additional rules for testing paths
# Mostly, we define additional 'pipeline' or 'all' tests here
files_yaml: |
".":
- .github/workflows/**
- nf-test.config
- nextflow.config
tests:
- assets/*
- bin/*
- conf/*
- main.nf
- nextflow_schema.json
files_ignore: |
.git*
.gitpod.yml
.prettierignore
.prettierrc.yml
**.md
**.png
modules.json
pyproject.toml
tower.yml
- name: debug
run: |
echo ${{ steps.changed_files.outputs.any_modified }}
echo ${{ steps.changed_files.outputs.all_changed_files }}
echo ${{ steps.changed_files.outputs.changed_keys }}
- name: nf-test list tags
id: list
if: ${{ steps.changed_files.outputs.any_modified }}
run: |
echo tags=$(python \
.github/python/find_changed_files.py \
-t pipeline workflow process \
-p ${{ steps.changed_files.outputs.all_changed_files }} ${{ steps.changed_files.outputs.changed_keys }} \
) >> $GITHUB_OUTPUT
- name: debug2
run: |
echo ${{ steps.list.outputs.tags }}
test:
name: ${{ matrix.tags }} ${{ matrix.profile }} NF-${{ matrix.NXF_VER }}
needs: [changes]
if: needs.changes.outputs.changes
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
NXF_VER:
- "latest-everything"
- "23.04"
tags: ["${{ fromJson(needs.changes.outputs.tags) }}"]
profile:
- "docker"
steps:
- name: Check out pipeline code
uses: actions/checkout@v4
- name: Install Nextflow
uses: nf-core/setup-nextflow@v1
with:
version: "${{ matrix.NXF_VER }}"
- uses: actions/setup-python@v4
with:
python-version: "3.11"
architecture: "x64"
- name: Install pdiff to see diff between nf-test snapshots
run: |
python -m pip install --upgrade pip
pip install pdiff
- name: Cache nf-test installation
id: cache-software
uses: actions/cache@v3
with:
path: |
/usr/local/bin/nf-test
/home/runner/.nf-test/nf-test.jar
key: ${{ runner.os }}-${{ env.NFT_VER }}-nftest
- name: Install nf-test
if: steps.cache-software.outputs.cache-hit != 'true'
run: |
wget -qO- https://code.askimed.com/install/nf-test | bash
sudo mv nf-test /usr/local/bin/
- name: Run nf-test
run: |
nf-test test --verbose --tag ${{ matrix.tags }} --profile "+${{ matrix.profile }}" --junitxml=test.xml --tap=test.tap
- uses: pcolby/tap-summary@v1
with:
path: >-
test.tap
- name: Output log on failure
if: failure()
run: |
sudo apt install bat > /dev/null
batcat --decorations=always --color=always ${{ github.workspace }}/.nf-test/tests/*/meta/nextflow.log
- name: Publish Test Report
uses: mikepenz/action-junit-report@v3
if: always() # always run even if the previous step fails
with:
report_paths: test.xml
confirm-pass:
runs-on: ubuntu-latest
needs: [test]
if: always()
steps:
- name: All tests ok
if: ${{ success() || !contains(needs.*.result, 'failure') }}
run: exit 0
- name: One or more tests failed
if: ${{ contains(needs.*.result, 'failure') }}
run: exit 1
- name: debug-print
if: always()
run: |
echo "toJSON(needs) = ${{ toJSON(needs) }}"
echo "toJSON(needs.*.result) = ${{ toJSON(needs.*.result) }}"