diff --git a/.github/markdownlint.yml b/.github/markdownlint.yml index a0eac5a096..793fead434 100644 --- a/.github/markdownlint.yml +++ b/.github/markdownlint.yml @@ -3,6 +3,13 @@ default: true line-length: false no-duplicate-header: siblings_only: true +no-inline-html: + allowed_elements: + - img + - p + - kbd + - details + - summary # tools only - the {{ jinja variables }} break URLs and cause this to error no-bare-urls: false # tools only - suppresses error messages for usage of $ in main README diff --git a/.gitignore b/.gitignore index 84ddfd3a08..77ce81a93b 100644 --- a/.gitignore +++ b/.gitignore @@ -112,3 +112,5 @@ ENV/ # Jetbrains IDEs .idea +pip-wheel-metadata +.vscode diff --git a/CHANGELOG.md b/CHANGELOG.md index e949ddce9d..db034a4e99 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,23 @@ # nf-core/tools: Changelog +## [v1.12.1 - Silver Dolphin](https://github.com/nf-core/tools/releases/tag/1.12.1) - [2020-12-03] + +### Template + +* Finished switch from `$baseDir` to `$projectDir` in `iGenomes.conf` and `main.nf` + * Main fix is for `smail_fields` which was a bug introduced in the previous release. Sorry about that! +* Ported a number of small content tweaks from nf-core/eager to the template [[#786](https://github.com/nf-core/tools/issues/786)] + * Better contributing documentation, more placeholders in documentation files, more relaxed markdownlint exceptions for certain HTML tags, more content for the PR and issue templates. + +### Tools helper code + +* Pipeline schema: make parameters of type `range` to `number`. [[#738](https://github.com/nf-core/tools/issues/738)] +* Respect `$NXF_HOME` when looking for pipelines with `nf-core list` [[#798](https://github.com/nf-core/tools/issues/798)] +* Swapped PyInquirer with questionary for command line questions in `launch.py` [[#726](https://github.com/nf-core/tools/issues/726)] + * This should fix conda installation issues that some people had been hitting + * The change also allows other improvements to the UI +* Fix linting crash when a file deleted but not yet staged in git [[#796](https://github.com/nf-core/tools/issues/796)] + ## [v1.12 - Mercury Weasel](https://github.com/nf-core/tools/releases/tag/1.12) - [2020-11-19] ### Tools helper code diff --git a/nf_core/launch.py b/nf_core/launch.py index 8d64a612b3..97231f8827 100644 --- a/nf_core/launch.py +++ b/nf_core/launch.py @@ -10,7 +10,8 @@ import json import logging import os -import PyInquirer +import prompt_toolkit +import questionary import re import subprocess import textwrap @@ -20,15 +21,21 @@ log = logging.getLogger(__name__) -# -# NOTE: When PyInquirer 1.0.3 is released we can capture keyboard interruptions -# in a nicer way # with the raise_keyboard_interrupt=True argument in the PyInquirer.prompt() calls -# It also allows list selections to have a default set. -# -# Until then we have workarounds: -# * Default list item is moved to the top of the list -# * We manually raise a KeyboardInterrupt if we get None back from a question -# +# Custom style for questionary +nfcore_question_style = prompt_toolkit.styles.Style( + [ + ("qmark", "fg:ansiblue bold"), # token in front of the question + ("question", "bold"), # question text + ("answer", "fg:ansigreen nobold"), # submitted answer text behind the question + ("pointer", "fg:ansiyellow bold"), # pointer used in select and checkbox prompts + ("highlighted", "fg:ansiblue bold"), # pointed-at choice in select and checkbox prompts + ("selected", "fg:ansigreen noreverse"), # style for a selected item of a checkbox + ("separator", "fg:ansiblack"), # separator in lists + ("instruction", ""), # user instructions for select, rawselect, checkbox + ("text", ""), # plain text + ("disabled", "fg:gray italic"), # disabled choices for select and checkbox prompts + ] +) class Launch(object): @@ -256,11 +263,9 @@ def prompt_web_gui(self): "name": "use_web_gui", "message": "Choose launch method", "choices": ["Web based", "Command line"], + "default": "Web based", } - answer = PyInquirer.prompt([question]) - # TODO: use raise_keyboard_interrupt=True when PyInquirer 1.0.3 is released - if answer == {}: - raise KeyboardInterrupt + answer = questionary.unsafe_prompt([question], style=nfcore_question_style) return answer["use_web_gui"] == "Web based" def launch_web_gui(self): @@ -347,14 +352,14 @@ def sanitise_web_response(self): The web builder returns everything as strings. Use the functions defined in the cli wizard to convert to the correct types. """ - # Collect pyinquirer objects for each defined input_param - pyinquirer_objects = {} + # Collect questionary objects for each defined input_param + questionary_objects = {} for param_id, param_obj in self.schema_obj.schema.get("properties", {}).items(): - pyinquirer_objects[param_id] = self.single_param_to_pyinquirer(param_id, param_obj, print_help=False) + questionary_objects[param_id] = self.single_param_to_questionary(param_id, param_obj, print_help=False) for d_key, definition in self.schema_obj.schema.get("definitions", {}).items(): for param_id, param_obj in definition.get("properties", {}).items(): - pyinquirer_objects[param_id] = self.single_param_to_pyinquirer(param_id, param_obj, print_help=False) + questionary_objects[param_id] = self.single_param_to_questionary(param_id, param_obj, print_help=False) # Go through input params and sanitise for params in [self.nxf_flags, self.schema_obj.input_params]: @@ -364,7 +369,7 @@ def sanitise_web_response(self): del params[param_id] continue # Run filter function on value - filter_func = pyinquirer_objects.get(param_id, {}).get("filter") + filter_func = questionary_objects.get(param_id, {}).get("filter") if filter_func is not None: params[param_id] = filter_func(params[param_id]) @@ -396,19 +401,13 @@ def prompt_param(self, param_id, param_obj, is_required, answers): """Prompt for a single parameter""" # Print the question - question = self.single_param_to_pyinquirer(param_id, param_obj, answers) - answer = PyInquirer.prompt([question]) - # TODO: use raise_keyboard_interrupt=True when PyInquirer 1.0.3 is released - if answer == {}: - raise KeyboardInterrupt + question = self.single_param_to_questionary(param_id, param_obj, answers) + answer = questionary.unsafe_prompt([question], style=nfcore_question_style) # If required and got an empty reponse, ask again while type(answer[param_id]) is str and answer[param_id].strip() == "" and is_required: log.error("'–-{}' is required".format(param_id)) - answer = PyInquirer.prompt([question]) - # TODO: use raise_keyboard_interrupt=True when PyInquirer 1.0.3 is released - if answer == {}: - raise KeyboardInterrupt + answer = questionary.unsafe_prompt([question], style=nfcore_question_style) # Don't return empty answers if answer[param_id] == "": @@ -426,29 +425,31 @@ def prompt_group(self, group_id, group_obj): Returns: Dict of param_id:val answers """ - question = { - "type": "list", - "name": group_id, - "message": group_obj.get("title", group_id), - "choices": ["Continue >>", PyInquirer.Separator()], - } - - for param_id, param in group_obj["properties"].items(): - if not param.get("hidden", False) or self.show_hidden: - question["choices"].append(param_id) - - # Skip if all questions hidden - if len(question["choices"]) == 2: - return {} - while_break = False answers = {} while not while_break: + question = { + "type": "list", + "name": group_id, + "message": group_obj.get("title", group_id), + "choices": ["Continue >>", questionary.Separator()], + } + + for param_id, param in group_obj["properties"].items(): + if not param.get("hidden", False) or self.show_hidden: + q_title = param_id + if param_id in answers: + q_title += " [{}]".format(answers[param_id]) + elif "default" in param: + q_title += " [{}]".format(param["default"]) + question["choices"].append(questionary.Choice(title=q_title, value=param_id)) + + # Skip if all questions hidden + if len(question["choices"]) == 2: + return {} + self.print_param_header(group_id, group_obj) - answer = PyInquirer.prompt([question]) - # TODO: use raise_keyboard_interrupt=True when PyInquirer 1.0.3 is released - if answer == {}: - raise KeyboardInterrupt + answer = questionary.unsafe_prompt([question], style=nfcore_question_style) if answer[group_id] == "Continue >>": while_break = True # Check if there are any required parameters that don't have answers @@ -456,7 +457,7 @@ def prompt_group(self, group_id, group_obj): req_default = self.schema_obj.input_params.get(p_required, "") req_answer = answers.get(p_required, "") if req_default == "" and req_answer == "": - log.error("'{}' is required.".format(p_required)) + log.error("'--{}' is required.".format(p_required)) while_break = False else: param_id = answer[group_id] @@ -465,8 +466,8 @@ def prompt_group(self, group_id, group_obj): return answers - def single_param_to_pyinquirer(self, param_id, param_obj, answers=None, print_help=True): - """Convert a JSONSchema param to a PyInquirer question + def single_param_to_questionary(self, param_id, param_obj, answers=None, print_help=True): + """Convert a JSONSchema param to a Questionary question Args: param_id: Parameter ID (string) @@ -475,7 +476,7 @@ def single_param_to_pyinquirer(self, param_id, param_obj, answers=None, print_he print_help: If description and help_text should be printed (bool) Returns: - Single PyInquirer dict, to be appended to questions list + Single Questionary dict, to be appended to questions list """ if answers is None: answers = {} @@ -530,7 +531,11 @@ def validate_number(val): try: if val.strip() == "": return True - float(val) + fval = float(val) + if "minimum" in param_obj and fval < float(param_obj["minimum"]): + return "Must be greater than or equal to {}".format(param_obj["minimum"]) + if "maximum" in param_obj and fval > float(param_obj["maximum"]): + return "Must be less than or equal to {}".format(param_obj["maximum"]) except ValueError: return "Must be a number" else: @@ -568,46 +573,11 @@ def filter_integer(val): question["filter"] = filter_integer - if param_obj.get("type") == "range": - # Validate range type - def validate_range(val): - try: - if val.strip() == "": - return True - fval = float(val) - if "minimum" in param_obj and fval < float(param_obj["minimum"]): - return "Must be greater than or equal to {}".format(param_obj["minimum"]) - if "maximum" in param_obj and fval > float(param_obj["maximum"]): - return "Must be less than or equal to {}".format(param_obj["maximum"]) - return True - except ValueError: - return "Must be a number" - - question["validate"] = validate_range - - # Filter returned value - def filter_range(val): - if val.strip() == "": - return "" - return float(val) - - question["filter"] = filter_range - if "enum" in param_obj: # Use a selection list instead of free text input question["type"] = "list" question["choices"] = param_obj["enum"] - # Validate enum from schema - def validate_enum(val): - if val == "": - return True - if val in param_obj["enum"]: - return True - return "Must be one of: {}".format(", ".join(param_obj["enum"])) - - question["validate"] = validate_enum - # Validate pattern from schema if "pattern" in param_obj: @@ -620,21 +590,6 @@ def validate_pattern(val): question["validate"] = validate_pattern - # WORKAROUND - PyInquirer <1.0.3 cannot have a default position in a list - # For now, move the default option to the top. - # TODO: Delete this code when PyInquirer >=1.0.3 is released. - if question["type"] == "list" and "default" in question: - try: - question["choices"].remove(question["default"]) - question["choices"].insert(0, question["default"]) - except ValueError: - log.warning( - "Default value `{}` not found in list of choices: {}".format( - question["default"], ", ".join(question["choices"]) - ) - ) - ### End of workaround code - return question def print_param_header(self, param_id, param_obj): diff --git a/nf_core/lint.py b/nf_core/lint.py index 74c5b170a4..e057bc38b6 100755 --- a/nf_core/lint.py +++ b/nf_core/lint.py @@ -239,7 +239,7 @@ def lint_pipeline(self, release_mode=False): log.debug("Running lint test: {}".format(fun_name)) getattr(self, fun_name)() if len(self.failed) > 0: - log.error("Found test failures in `{}`, halting lint run.".format(fun_name)) + log.critical("Found test failures in `{}`, halting lint run.".format(fun_name)) break def check_files_exist(self): @@ -1241,17 +1241,25 @@ def check_cookiecutter_strings(self): num_files = 0 for fn in list_of_files: num_files += 1 - with io.open(fn, "r", encoding="latin1") as fh: - lnum = 0 - for l in fh: - lnum += 1 - cc_matches = re.findall(r"{{\s*cookiecutter[^}]*}}", l) - if len(cc_matches) > 0: - for cc_match in cc_matches: - self.failed.append( - (13, "Found a cookiecutter template string in `{}` L{}: {}".format(fn, lnum, cc_match)) - ) - num_matches += 1 + try: + with io.open(fn, "r", encoding="latin1") as fh: + lnum = 0 + for l in fh: + lnum += 1 + cc_matches = re.findall(r"{{\s*cookiecutter[^}]*}}", l) + if len(cc_matches) > 0: + for cc_match in cc_matches: + self.failed.append( + ( + 13, + "Found a cookiecutter template string in `{}` L{}: {}".format( + fn, lnum, cc_match + ), + ) + ) + num_matches += 1 + except FileNotFoundError as e: + log.warn("`git ls-files` returned '{}' but could not open it!".format(fn)) if num_matches == 0: self.passed.append((13, "Did not find any cookiecutter template strings ({} files)".format(num_files))) diff --git a/nf_core/list.py b/nf_core/list.py index c81d35bd42..45cbeb5d18 100644 --- a/nf_core/list.py +++ b/nf_core/list.py @@ -128,6 +128,8 @@ def get_local_nf_workflows(self): # Try to guess the local cache directory (much faster than calling nextflow) if len(os.environ.get("NXF_ASSETS", "")) > 0: nextflow_wfdir = os.environ.get("NXF_ASSETS") + elif len(os.environ.get("NXF_HOME", "")) > 0: + nextflow_wfdir = os.path.join(os.environ.get("NXF_HOME"), "assets") else: nextflow_wfdir = os.path.join(os.getenv("HOME"), ".nextflow", "assets") if os.path.isdir(nextflow_wfdir): @@ -348,6 +350,8 @@ def get_local_nf_workflow_details(self): # Try to guess the local cache directory if len(os.environ.get("NXF_ASSETS", "")) > 0: nf_wfdir = os.path.join(os.environ.get("NXF_ASSETS"), self.full_name) + elif len(os.environ.get("NXF_HOME", "")) > 0: + nf_wfdir = os.path.join(os.environ.get("NXF_HOME"), "assets") else: nf_wfdir = os.path.join(os.getenv("HOME"), ".nextflow", "assets", self.full_name) if os.path.isdir(nf_wfdir): diff --git a/nf_core/pipeline-template/{{cookiecutter.name_noslash}}/.github/CONTRIBUTING.md b/nf_core/pipeline-template/{{cookiecutter.name_noslash}}/.github/CONTRIBUTING.md index 8ab3b9bd2e..8bedc3996e 100644 --- a/nf_core/pipeline-template/{{cookiecutter.name_noslash}}/.github/CONTRIBUTING.md +++ b/nf_core/pipeline-template/{{cookiecutter.name_noslash}}/.github/CONTRIBUTING.md @@ -18,8 +18,9 @@ If you'd like to write some code for {{ cookiecutter.name }}, the standard workf 1. Check that there isn't already an issue about your idea in the [{{ cookiecutter.name }} issues](https://github.com/{{ cookiecutter.name }}/issues) to avoid duplicating work * If there isn't one already, please create one so that others know you're working on this 2. [Fork](https://help.github.com/en/github/getting-started-with-github/fork-a-repo) the [{{ cookiecutter.name }} repository](https://github.com/{{ cookiecutter.name }}) to your GitHub account -3. Make the necessary changes / additions within your forked repository -4. Submit a Pull Request against the `dev` branch and wait for the code to be reviewed and merged +3. Make the necessary changes / additions within your forked repository following [Pipeline conventions](#pipeline-contribution-conventions) +4. Use `nf-core schema build .` and add any new parameters to the pipeline JSON schema (requires [nf-core tools](https://github.com/nf-core/tools) >= 1.10). +5. Submit a Pull Request against the `dev` branch and wait for the code to be reviewed and merged If you're not used to this workflow with git, you can start with some [docs from GitHub](https://help.github.com/en/github/collaborating-with-issues-and-pull-requests) or even their [excellent `git` resources](https://try.github.io/). @@ -30,14 +31,14 @@ Typically, pull-requests are only fully reviewed when these tests are passing, t There are typically two types of tests that run: -### Lint Tests +### Lint tests `nf-core` has a [set of guidelines](https://nf-co.re/developers/guidelines) which all pipelines must adhere to. To enforce these and ensure that all pipelines stay in sync, we have developed a helper tool which runs checks on the pipeline code. This is in the [nf-core/tools repository](https://github.com/nf-core/tools) and once installed can be run locally with the `nf-core lint ` command. If any failures or warnings are encountered, please follow the listed URL for more documentation. -### Pipeline Tests +### Pipeline tests Each `nf-core` pipeline should be set up with a minimal set of test-data. `GitHub Actions` then runs the pipeline on this data to ensure that it exits successfully. @@ -55,3 +56,73 @@ These tests are run both with the latest available version of `Nextflow` and als ## Getting help For further information/help, please consult the [{{ cookiecutter.name }} documentation](https://nf-co.re/{{ cookiecutter.short_name }}/usage) and don't hesitate to get in touch on the nf-core Slack [#{{ cookiecutter.short_name }}](https://nfcore.slack.com/channels/{{ cookiecutter.short_name }}) channel ([join our Slack here](https://nf-co.re/join/slack)). + +## Pipeline contribution conventions + +To make the {{ cookiecutter.name }} code and processing logic more understandable for new contributors and to ensure quality, we semi-standardise the way the code and other contributions are written. + +### Adding a new step + +If you wish to contribute a new step, please use the following coding standards: + +1. Define the corresponding input channel into your new process from the expected previous process channel +2. Write the process block (see below). +3. Define the output channel if needed (see below). +4. Add any new flags/options to `nextflow.config` with a default (see below). +5. Add any new flags/options to `nextflow_schema.json` with help text (with `nf-core schema build .`) +6. Add any new flags/options to the help message (for integer/text parameters, print to help the corresponding `nextflow.config` parameter). +7. Add sanity checks for all relevant parameters. +8. Add any new software to the `scrape_software_versions.py` script in `bin/` and the version command to the `scrape_software_versions` process in `main.nf`. +9. Do local tests that the new code works properly and as expected. +10. Add a new test command in `.github/workflow/ci.yaml`. +11. If applicable add a [MultiQC](https://https://multiqc.info/) module. +12. Update MultiQC config `assets/multiqc_config.yaml` so relevant suffixes, name clean up, General Statistics Table column order, and module figures are in the right order. +13. Optional: Add any descriptions of MultiQC report sections and output files to `docs/output.md`. + +### Default values + +Parameters should be initialised / defined with default values in `nextflow.config` under the `params` scope. + +Once there, use `nf-core schema build .` to add to `nextflow_schema.json`. + +### Default processes resource requirements + +Sensible defaults for process resource requirements (CPUs / memory / time) for a process should be defined in `conf/base.config`. These should generally be specified generic with `withLabel:` selectors so they can be shared across multiple processes/steps of the pipeline. A nf-core standard set of labels that should be followed where possible can be seen in the [nf-core pipeline template](https://github.com/nf-core/tools/blob/master/nf_core/pipeline-template/%7B%7Bcookiecutter.name_noslash%7D%7D/conf/base.config), which has the default process as a single core-process, and then different levels of multi-core configurations for increasingly large memory requirements defined with standardised labels. + +The process resources can be passed on to the tool dynamically within the process with the `${task.cpu}` and `${task.memory}` variables in the `script:` block. + +### Naming schemes + +Please use the following naming schemes, to make it easy to understand what is going where. + +* initial process channel: `ch_output_from_` +* intermediate and terminal channels: `ch__for_` + +### Nextflow version bumping + +If you are using a new feature from core Nextflow, you may bump the minimum required version of nextflow in the pipeline with: `nf-core bump-version --nextflow . [min-nf-version]` + +### Software version reporting + +If you add a new tool to the pipeline, please ensure you add the information of the tool to the `get_software_version` process. + +Add to the script block of the process, something like the following: + +```bash + --version &> v_.txt 2>&1 || true +``` + +or + +```bash + --help | head -n 1 &> v_.txt 2>&1 || true +``` + +You then need to edit the script `bin/scrape_software_versions.py` to: + +1. Add a Python regex for your tool's `--version` output (as in stored in the `v_.txt` file), to ensure the version is reported as a `v` and the version number e.g. `v2.1.1` +2. Add a HTML entry to the `OrderedDict` for formatting in MultiQC. + +### Images and figures + +For overview images and other documents we follow the nf-core [style guidelines and examples](https://nf-co.re/developers/design_guidelines). diff --git a/nf_core/pipeline-template/{{cookiecutter.name_noslash}}/.github/ISSUE_TEMPLATE/bug_report.md b/nf_core/pipeline-template/{{cookiecutter.name_noslash}}/.github/ISSUE_TEMPLATE/bug_report.md index 6f384d628a..64aa8d22b0 100644 --- a/nf_core/pipeline-template/{{cookiecutter.name_noslash}}/.github/ISSUE_TEMPLATE/bug_report.md +++ b/nf_core/pipeline-template/{{cookiecutter.name_noslash}}/.github/ISSUE_TEMPLATE/bug_report.md @@ -13,6 +13,13 @@ Thanks for telling us about a problem with the pipeline. Please delete this text and anything that's not relevant from the template below: --> +## Check Documentation + +I have checked the following places for your error: + +- [ ] [nf-core website: troubleshooting](https://nf-co.re/usage/troubleshooting) +- [ ] [{{ cookiecutter.name }} pipeline documentation](https://nf-co.re/{{ cookiecutter.name }}/usage) + ## Description of the bug @@ -28,6 +35,13 @@ Steps to reproduce the behaviour: +## Log files + +Have you provided the following extra information/files: + +- [ ] The command used to run the pipeline +- [ ] The `.nextflow.log` file + ## System - Hardware: diff --git a/nf_core/pipeline-template/{{cookiecutter.name_noslash}}/.github/PULL_REQUEST_TEMPLATE.md b/nf_core/pipeline-template/{{cookiecutter.name_noslash}}/.github/PULL_REQUEST_TEMPLATE.md index 25f24d6c1f..8c4b16d0f7 100644 --- a/nf_core/pipeline-template/{{cookiecutter.name_noslash}}/.github/PULL_REQUEST_TEMPLATE.md +++ b/nf_core/pipeline-template/{{cookiecutter.name_noslash}}/.github/PULL_REQUEST_TEMPLATE.md @@ -13,8 +13,14 @@ Learn more about contributing: [CONTRIBUTING.md](https://github.com/{{ cookiecut ## PR checklist -- [ ] This comment contains a description of changes (with reason) -- [ ] `CHANGELOG.md` is updated +- [ ] This comment contains a description of changes (with reason). - [ ] If you've fixed a bug or added code that should be tested, add tests! -- [ ] Documentation in `docs` is updated -- [ ] If necessary, also make a PR on the [{{ cookiecutter.name }} branch on the nf-core/test-datasets repo](https://github.com/nf-core/test-datasets/pull/new/{{ cookiecutter.name }}) + - [ ] If you've added a new tool - add to the software_versions process and a regex to `scrape_software_versions.py` + - [ ] If you've added a new tool - have you followed the pipeline conventions in the [contribution docs](https://github.com/{{ cookiecutter.name }}/tree/master/.github/CONTRIBUTING.md) + - [ ] If necessary, also make a PR on the {{ cookiecutter.name }} _branch_ on the [nf-core/test-datasets](https://github.com/nf-core/test-datasets) repository. +- [ ] Make sure your code lints (`nf-core lint .`). +- [ ] Ensure the test suite passes (`nextflow run . -profile test,docker`). +- [ ] Usage Documentation in `docs/usage.md` is updated. +- [ ] Output Documentation in `docs/output.md` is updated. +- [ ] `CHANGELOG.md` is updated. +- [ ] `README.md` is updated (including new tool citations and authors/contributors). diff --git a/nf_core/pipeline-template/{{cookiecutter.name_noslash}}/.github/markdownlint.yml b/nf_core/pipeline-template/{{cookiecutter.name_noslash}}/.github/markdownlint.yml index 96b12a7039..8d7eb53b07 100644 --- a/nf_core/pipeline-template/{{cookiecutter.name_noslash}}/.github/markdownlint.yml +++ b/nf_core/pipeline-template/{{cookiecutter.name_noslash}}/.github/markdownlint.yml @@ -1,5 +1,12 @@ # Markdownlint configuration file -default: true, +default: true line-length: false no-duplicate-header: siblings_only: true +no-inline-html: + allowed_elements: + - img + - p + - kbd + - details + - summary diff --git a/nf_core/pipeline-template/{{cookiecutter.name_noslash}}/README.md b/nf_core/pipeline-template/{{cookiecutter.name_noslash}}/README.md index 6b8859a531..8f3b5b3cb6 100644 --- a/nf_core/pipeline-template/{{cookiecutter.name_noslash}}/README.md +++ b/nf_core/pipeline-template/{{cookiecutter.name_noslash}}/README.md @@ -12,6 +12,9 @@ ## Introduction + +**{{ cookiecutter.name }}** is a bioinformatics best-practise analysis pipeline for + The pipeline is built using [Nextflow](https://www.nextflow.io), a workflow tool to run tasks across multiple compute infrastructures in a very portable manner. It comes with docker containers making installation trivial and results highly reproducible. ## Quick Start @@ -38,6 +41,15 @@ The pipeline is built using [Nextflow](https://www.nextflow.io), a workflow tool See [usage docs](https://nf-co.re/{{ cookiecutter.short_name }}/usage) for all of the available options when running the pipeline. +## Pipeline Summary + +By default, the pipeline currently performs the following: + + + +* Sequencing quality control (`FastQC`) +* Overall pipeline run summaries (`MultiQC`) + ## Documentation The {{ cookiecutter.name }} pipeline comes with documentation about the pipeline: [usage](https://nf-co.re/{{ cookiecutter.short_name }}/usage) and [output](https://nf-co.re/{{ cookiecutter.short_name }}/output). @@ -48,13 +60,18 @@ The {{ cookiecutter.name }} pipeline comes with documentation about the pipeline {{ cookiecutter.name }} was originally written by {{ cookiecutter.author }}. +We thank the following people for their extensive assistance in the development +of this pipeline: + + + ## Contributions and Support If you would like to contribute to this pipeline, please see the [contributing guidelines](.github/CONTRIBUTING.md). For further information or help, don't hesitate to get in touch on the [Slack `#{{ cookiecutter.short_name }}` channel](https://nfcore.slack.com/channels/{{ cookiecutter.short_name }}) (you can join with [this invite](https://nf-co.re/join/slack)). -## Citation +## Citations @@ -67,3 +84,7 @@ You can cite the `nf-core` publication as follows: > > _Nat Biotechnol._ 2020 Feb 13. doi: [10.1038/s41587-020-0439-x](https://dx.doi.org/10.1038/s41587-020-0439-x). > ReadCube: [Full Access Link](https://rdcu.be/b1GjZ) + +In addition, references of tools and data used in this pipeline are as follows: + + diff --git a/nf_core/pipeline-template/{{cookiecutter.name_noslash}}/conf/igenomes.config b/nf_core/pipeline-template/{{cookiecutter.name_noslash}}/conf/igenomes.config index caeafceb25..31b7ee6130 100644 --- a/nf_core/pipeline-template/{{cookiecutter.name_noslash}}/conf/igenomes.config +++ b/nf_core/pipeline-template/{{cookiecutter.name_noslash}}/conf/igenomes.config @@ -21,7 +21,7 @@ params { readme = "${params.igenomes_base}/Homo_sapiens/Ensembl/GRCh37/Annotation/README.txt" mito_name = "MT" macs_gsize = "2.7e9" - blacklist = "${baseDir}/assets/blacklists/GRCh37-blacklist.bed" + blacklist = "${projectDir}/assets/blacklists/GRCh37-blacklist.bed" } 'GRCh38' { fasta = "${params.igenomes_base}/Homo_sapiens/NCBI/GRCh38/Sequence/WholeGenomeFasta/genome.fa" @@ -33,7 +33,7 @@ params { bed12 = "${params.igenomes_base}/Homo_sapiens/NCBI/GRCh38/Annotation/Genes/genes.bed" mito_name = "chrM" macs_gsize = "2.7e9" - blacklist = "${baseDir}/assets/blacklists/hg38-blacklist.bed" + blacklist = "${projectDir}/assets/blacklists/hg38-blacklist.bed" } 'GRCm38' { fasta = "${params.igenomes_base}/Mus_musculus/Ensembl/GRCm38/Sequence/WholeGenomeFasta/genome.fa" @@ -46,7 +46,7 @@ params { readme = "${params.igenomes_base}/Mus_musculus/Ensembl/GRCm38/Annotation/README.txt" mito_name = "MT" macs_gsize = "1.87e9" - blacklist = "${baseDir}/assets/blacklists/GRCm38-blacklist.bed" + blacklist = "${projectDir}/assets/blacklists/GRCm38-blacklist.bed" } 'TAIR10' { fasta = "${params.igenomes_base}/Arabidopsis_thaliana/Ensembl/TAIR10/Sequence/WholeGenomeFasta/genome.fa" @@ -270,7 +270,7 @@ params { bed12 = "${params.igenomes_base}/Homo_sapiens/UCSC/hg38/Annotation/Genes/genes.bed" mito_name = "chrM" macs_gsize = "2.7e9" - blacklist = "${baseDir}/assets/blacklists/hg38-blacklist.bed" + blacklist = "${projectDir}/assets/blacklists/hg38-blacklist.bed" } 'hg19' { fasta = "${params.igenomes_base}/Homo_sapiens/UCSC/hg19/Sequence/WholeGenomeFasta/genome.fa" @@ -283,7 +283,7 @@ params { readme = "${params.igenomes_base}/Homo_sapiens/UCSC/hg19/Annotation/README.txt" mito_name = "chrM" macs_gsize = "2.7e9" - blacklist = "${baseDir}/assets/blacklists/hg19-blacklist.bed" + blacklist = "${projectDir}/assets/blacklists/hg19-blacklist.bed" } 'mm10' { fasta = "${params.igenomes_base}/Mus_musculus/UCSC/mm10/Sequence/WholeGenomeFasta/genome.fa" @@ -296,7 +296,7 @@ params { readme = "${params.igenomes_base}/Mus_musculus/UCSC/mm10/Annotation/README.txt" mito_name = "chrM" macs_gsize = "1.87e9" - blacklist = "${baseDir}/assets/blacklists/mm10-blacklist.bed" + blacklist = "${projectDir}/assets/blacklists/mm10-blacklist.bed" } 'bosTau8' { fasta = "${params.igenomes_base}/Bos_taurus/UCSC/bosTau8/Sequence/WholeGenomeFasta/genome.fa" diff --git a/nf_core/pipeline-template/{{cookiecutter.name_noslash}}/main.nf b/nf_core/pipeline-template/{{cookiecutter.name_noslash}}/main.nf index f5c767e31e..e8f861f054 100644 --- a/nf_core/pipeline-template/{{cookiecutter.name_noslash}}/main.nf +++ b/nf_core/pipeline-template/{{cookiecutter.name_noslash}}/main.nf @@ -336,7 +336,7 @@ workflow.onComplete { def email_html = html_template.toString() // Render the sendmail template - def smail_fields = [ email: email_address, subject: subject, email_txt: email_txt, email_html: email_html, baseDir: "$projectDir", mqcFile: mqc_report, mqcMaxSize: params.max_multiqc_email_size.toBytes() ] + def smail_fields = [ email: email_address, subject: subject, email_txt: email_txt, email_html: email_html, projectDir: "$projectDir", mqcFile: mqc_report, mqcMaxSize: params.max_multiqc_email_size.toBytes() ] def sf = new File("$projectDir/assets/sendmail_template.txt") def sendmail_template = engine.createTemplate(sf).make(smail_fields) def sendmail_html = sendmail_template.toString() diff --git a/nf_core/schema.py b/nf_core/schema.py index 0df35e3ea3..1fea917283 100644 --- a/nf_core/schema.py +++ b/nf_core/schema.py @@ -424,7 +424,7 @@ def prompt_remove_schema_notfound_config(self, p_key): if self.no_prompts or self.schema_from_scratch: return True if Confirm.ask( - ":question: Unrecognised [white bold]'params.{}'[/] found in schema but not pipeline! [yellow]Remove it?".format( + ":question: Unrecognised [white bold]'params.{}'[/] found in the schema but not in the pipeline config! [yellow]Remove it?".format( p_key ) ): @@ -443,7 +443,7 @@ def add_schema_found_configs(self): self.no_prompts or self.schema_from_scratch or Confirm.ask( - ":sparkles: Found [white bold]'params.{}'[/] in pipeline but not in schema. [blue]Add to pipeline schema?".format( + ":sparkles: Found [white bold]'params.{}'[/] in the pipeline config, but not in the schema. [blue]Add to pipeline schema?".format( p_key ) ) diff --git a/nf_core/utils.py b/nf_core/utils.py index f09c4bd3cb..2e6388db31 100644 --- a/nf_core/utils.py +++ b/nf_core/utils.py @@ -68,9 +68,12 @@ def fetch_wf_config(wf_path): cache_basedir = None cache_path = None + # Nextflow home directory - use env var if set, or default to ~/.nextflow + nxf_home = os.environ.get("NXF_HOME", os.path.join(os.getenv("HOME"), ".nextflow")) + # Build a cache directory if we can - if os.path.isdir(os.path.join(os.getenv("HOME"), ".nextflow")): - cache_basedir = os.path.join(os.getenv("HOME"), ".nextflow", "nf-core") + if os.path.isdir(nxf_home): + cache_basedir = os.path.join(nxf_home, "nf-core") if not os.path.isdir(cache_basedir): os.mkdir(cache_basedir) diff --git a/setup.py b/setup.py index de3c457207..7cd0ebbe25 100644 --- a/setup.py +++ b/setup.py @@ -3,7 +3,7 @@ from setuptools import setup, find_packages import sys -version = "1.12" +version = "1.12.1" with open("README.md") as f: readme = f.read() @@ -36,7 +36,8 @@ "GitPython", "jinja2", "jsonschema", - "PyInquirer==1.0.2", + "questionary>=1.8.0", + "prompt_toolkit>=3.0.3", "pyyaml", "requests", "requests_cache", diff --git a/tests/test_launch.py b/tests/test_launch.py index 70acc982a4..ac3575b407 100644 --- a/tests/test_launch.py +++ b/tests/test_launch.py @@ -93,21 +93,21 @@ def test_nf_merge_schema(self): assert self.launcher.schema_obj.schema["allOf"][0] == {"$ref": "#/definitions/coreNextflow"} assert "-resume" in self.launcher.schema_obj.schema["definitions"]["coreNextflow"]["properties"] - def test_ob_to_pyinquirer_string(self): + def test_ob_to_questionary_string(self): """ Check converting a python dict to a pyenquirer format - simple strings """ sc_obj = { "type": "string", "default": "data/*{1,2}.fastq.gz", } - result = self.launcher.single_param_to_pyinquirer("input", sc_obj) + result = self.launcher.single_param_to_questionary("input", sc_obj) assert result == {"type": "input", "name": "input", "message": "input", "default": "data/*{1,2}.fastq.gz"} - @mock.patch("PyInquirer.prompt", side_effect=[{"use_web_gui": "Web based"}]) + @mock.patch("questionary.unsafe_prompt", side_effect=[{"use_web_gui": "Web based"}]) def test_prompt_web_gui_true(self, mock_prompt): """ Check the prompt to launch the web schema or use the cli """ assert self.launcher.prompt_web_gui() == True - @mock.patch("PyInquirer.prompt", side_effect=[{"use_web_gui": "Command line"}]) + @mock.patch("questionary.unsafe_prompt", side_effect=[{"use_web_gui": "Command line"}]) def test_prompt_web_gui_false(self, mock_prompt): """ Check the prompt to launch the web schema or use the cli """ assert self.launcher.prompt_web_gui() == False @@ -198,13 +198,13 @@ def test_sanitise_web_response(self): assert self.launcher.schema_obj.input_params["single_end"] == True assert self.launcher.schema_obj.input_params["max_cpus"] == 12 - def test_ob_to_pyinquirer_bool(self): + def test_ob_to_questionary_bool(self): """ Check converting a python dict to a pyenquirer format - booleans """ sc_obj = { "type": "boolean", "default": "True", } - result = self.launcher.single_param_to_pyinquirer("single_end", sc_obj) + result = self.launcher.single_param_to_questionary("single_end", sc_obj) assert result["type"] == "list" assert result["name"] == "single_end" assert result["message"] == "single_end" @@ -218,10 +218,10 @@ def test_ob_to_pyinquirer_bool(self): assert result["filter"]("false") == False assert result["filter"](False) == False - def test_ob_to_pyinquirer_number(self): + def test_ob_to_questionary_number(self): """ Check converting a python dict to a pyenquirer format - with enum """ sc_obj = {"type": "number", "default": 0.1} - result = self.launcher.single_param_to_pyinquirer("min_reps_consensus", sc_obj) + result = self.launcher.single_param_to_questionary("min_reps_consensus", sc_obj) assert result["type"] == "input" assert result["default"] == "0.1" assert result["validate"]("123") is True @@ -232,10 +232,10 @@ def test_ob_to_pyinquirer_number(self): assert result["filter"]("123.456") == float(123.456) assert result["filter"]("") == "" - def test_ob_to_pyinquirer_integer(self): + def test_ob_to_questionary_integer(self): """ Check converting a python dict to a pyenquirer format - with enum """ sc_obj = {"type": "integer", "default": 1} - result = self.launcher.single_param_to_pyinquirer("broad_cutoff", sc_obj) + result = self.launcher.single_param_to_questionary("broad_cutoff", sc_obj) assert result["type"] == "input" assert result["default"] == "1" assert result["validate"]("123") is True @@ -246,10 +246,10 @@ def test_ob_to_pyinquirer_integer(self): assert result["filter"]("123") == int(123) assert result["filter"]("") == "" - def test_ob_to_pyinquirer_range(self): + def test_ob_to_questionary_range(self): """ Check converting a python dict to a pyenquirer format - with enum """ - sc_obj = {"type": "range", "minimum": "10", "maximum": "20", "default": 15} - result = self.launcher.single_param_to_pyinquirer("broad_cutoff", sc_obj) + sc_obj = {"type": "number", "minimum": "10", "maximum": "20", "default": 15} + result = self.launcher.single_param_to_questionary("broad_cutoff", sc_obj) assert result["type"] == "input" assert result["default"] == "15" assert result["validate"]("20") is True @@ -260,21 +260,18 @@ def test_ob_to_pyinquirer_range(self): assert result["filter"]("20") == float(20) assert result["filter"]("") == "" - def test_ob_to_pyinquirer_enum(self): - """ Check converting a python dict to a pyenquirer format - with enum """ + def test_ob_to_questionary_enum(self): + """ Check converting a python dict to a questionary format - with enum """ sc_obj = {"type": "string", "default": "copy", "enum": ["symlink", "rellink"]} - result = self.launcher.single_param_to_pyinquirer("publish_dir_mode", sc_obj) + result = self.launcher.single_param_to_questionary("publish_dir_mode", sc_obj) assert result["type"] == "list" assert result["default"] == "copy" assert result["choices"] == ["symlink", "rellink"] - assert result["validate"]("symlink") is True - assert result["validate"]("") is True - assert result["validate"]("not_allowed") == "Must be one of: symlink, rellink" - def test_ob_to_pyinquirer_pattern(self): - """ Check converting a python dict to a pyenquirer format - with pattern """ + def test_ob_to_questionary_pattern(self): + """ Check converting a python dict to a questionary format - with pattern """ sc_obj = {"type": "string", "pattern": "^([a-zA-Z0-9_\\-\\.]+)@([a-zA-Z0-9_\\-\\.]+)\\.([a-zA-Z]{2,5})$"} - result = self.launcher.single_param_to_pyinquirer("email", sc_obj) + result = self.launcher.single_param_to_questionary("email", sc_obj) assert result["type"] == "input" assert result["validate"]("test@email.com") is True assert result["validate"]("") is True