diff --git a/HISTORY.rst b/HISTORY.rst index d63ec2854..d212152db 100644 --- a/HISTORY.rst +++ b/HISTORY.rst @@ -6,11 +6,28 @@ History .. to_doc --------------------- -0.55.0.dev0 +0.56.0.dev0 --------------------- +--------------------- +0.55.0 (2018-09-12) +--------------------- + +* Add commands to create Galaxy training materials (thanks to `@bebatut`_). + `Pull Request 861`_ +* Fix `planemo test` when TEMP env variable contain spaces (thanks to + `@nsoranzo`_). + `Pull Request 851`_ +* Support testing a completely remote galaxy instance (thanks to `@erasche`_). + `Pull Request 856`_ +* Allow naming history from command line (thanks to `@erasche`_). + `Pull Request 860`_ +* Sync galaxy.xsd from galaxy repo (thanks to `@nsoranzo`_). + `Pull Request 866`_ +* Fix ServeTestCase.test_shed_serve test (thanks to `@nsoranzo`). bad810a_ + --------------------- 0.54.0 (2018-06-06) --------------------- @@ -1357,6 +1374,12 @@ History tools - and more experimental features involving Docker and Homebrew. 7d07782_ .. github_links +.. _bad810a: https://github.com/galaxyproject/planemo/commit/bad810a +.. _Pull Request 851: https://github.com/galaxyproject/planemo/pull/851 +.. _Pull Request 856: https://github.com/galaxyproject/planemo/pull/856 +.. _Pull Request 860: https://github.com/galaxyproject/planemo/pull/860 +.. _Pull Request 866: https://github.com/galaxyproject/planemo/pull/866 +.. _Pull Request 861: https://github.com/galaxyproject/planemo/pull/861 .. _324c776: https://github.com/galaxyproject/planemo/commit/324c776 .. _72d2ca7: https://github.com/galaxyproject/planemo/commit/72d2ca7 .. _b12b117: https://github.com/galaxyproject/planemo/commit/b12b117 @@ -1949,3 +1972,4 @@ History .. _@dfornika: https://github.com/dfornika .. _@bernt-matthias: https://github.com/bernt-matthias .. _@katrinleinweber: https://github.com/katrinleinweber +.. _@bebatut: https://github.com/bebatut diff --git a/docs/commands.rst b/docs/commands.rst index 6fea08d5e..f62c172c0 100644 --- a/docs/commands.rst +++ b/docs/commands.rst @@ -23,7 +23,6 @@ documentation describes these commands. .. include:: commands/conda_search.rst .. include:: commands/config_init.rst .. include:: commands/container_register.rst -.. include:: commands/cwl_script.rst .. include:: commands/database_create.rst .. include:: commands/database_delete.rst .. include:: commands/database_list.rst @@ -41,7 +40,6 @@ documentation describes these commands. .. include:: commands/profile_list.rst .. include:: commands/project_init.rst .. include:: commands/pull_request.rst -.. include:: commands/run.rst .. include:: commands/serve.rst .. include:: commands/share_test.rst .. include:: commands/shed_build.rst @@ -58,6 +56,9 @@ documentation describes these commands. .. include:: commands/test_reports.rst .. include:: commands/tool_factory.rst .. include:: commands/tool_init.rst +.. include:: commands/training_fill_data_library.rst +.. include:: commands/training_generate_from_wf.rst +.. include:: commands/training_init.rst .. include:: commands/travis_init.rst .. include:: commands/virtualenv.rst .. include:: commands/workflow_convert.rst \ No newline at end of file diff --git a/docs/commands/training_fill_data_library.rst b/docs/commands/training_fill_data_library.rst new file mode 100644 index 000000000..f9d141429 --- /dev/null +++ b/docs/commands/training_fill_data_library.rst @@ -0,0 +1,27 @@ + +``training_fill_data_library`` command +====================================== + +This section is auto-generated from the help text for the planemo command +``training_fill_data_library``. This help message can be generated with ``planemo training_fill_data_library +--help``. + +**Usage**:: + + planemo training_fill_data_library [OPTIONS] TOOL_PATH + +**Help** + +Build training template from workflow. +**Options**:: + + + --topic_name TEXT Name (directory name) of the topic to create or in which + a tutorial should be created or updates [required] + --tutorial_name TEXT Name (directory name) of the tutorial to modify + [required] + --zenodo_link TEXT Zenodo URL with the input data + --datatypes PATH YAML file with the correspondance between Zenodo + extension and Galaxy datatypes + --help Show this message and exit. + diff --git a/docs/commands/training_generate_from_wf.rst b/docs/commands/training_generate_from_wf.rst new file mode 100644 index 000000000..d9659978a --- /dev/null +++ b/docs/commands/training_generate_from_wf.rst @@ -0,0 +1,171 @@ + +``training_generate_from_wf`` command +====================================== + +This section is auto-generated from the help text for the planemo command +``training_generate_from_wf``. This help message can be generated with ``planemo training_generate_from_wf +--help``. + +**Usage**:: + + planemo training_generate_from_wf [OPTIONS] TOOL_PATH + +**Help** + +Create tutorial skeleton from workflow. +**Options**:: + + + --topic_name TEXT Name (directory name) of the topic to create + or in which a tutorial should be created or + updates [required] + --tutorial_name TEXT Name (directory name) of the tutorial to + modify [required] + --workflow PATH Workflow of the tutorial (locally) + --galaxy_url TEXT URL of a Galaxy instance with the workflow + --galaxy_api_key TEXT API key on the Galaxy instance with the + workflow + --workflow_id TEXT ID of the workflow on the Galaxy instance + --galaxy_root DIRECTORY Root of development galaxy directory to + execute command with. + --galaxy_database_seed PATH Preseeded Galaxy sqlite database to target. + --extra_tools PATH Extra tool sources to include in Galaxy's tool + panel (file or directory). These will not be + linted/tested/etc... but they will be + available to workflows and for interactive + use. + --install_galaxy Download and configure a disposable copy of + Galaxy from github. + --galaxy_branch TEXT Branch of Galaxy to target (defaults to + master) if a Galaxy root isn't specified. + --galaxy_source TEXT Git source of Galaxy to target (defaults to + the official galaxyproject github source if a + Galaxy root isn't specified. + --skip_venv Do not create or source a virtualenv + environment for Galaxy, this should be used or + instance to preserve an externally configured + virtual environment or conda environment. + --no_cache_galaxy Skip caching of Galaxy source and dependencies + obtained with --install_galaxy. Not caching + this results in faster downloads (no git) - so + is better on throw away instances such with + TravisCI. + --no_cleanup Do not cleanup temp files created for and by + Galaxy. + --galaxy_email TEXT E-mail address to use when launching single- + user Galaxy server. + --docker / --no_docker Run Galaxy tools in Docker if enabled. + --docker_cmd TEXT Command used to launch docker (defaults to + docker). + --docker_sudo / --no_docker_sudo + Flag to use sudo when running docker. + --docker_host TEXT Docker host to target when executing docker + commands (defaults to localhost). + --docker_sudo_cmd TEXT sudo command to use when --docker_sudo is + enabled (defaults to sudo). + --mulled_containers, --biocontainers + Test tools against mulled containers (forces + --docker). + --job_config_file PATH Job configuration file for Galaxy to target. + --tool_dependency_dir DIRECTORY + Tool dependency dir for Galaxy to target. + --port INTEGER Port to serve Galaxy on (default is 9090). + --host TEXT Host to bind Galaxy to. Default is 127.0.0.1 + that is restricted to localhost connections + for security reasons set to 0.0.0.0 to bind + Galaxy to all ports including potentially + publicly accessible ones. + --engine [galaxy|docker_galaxy|external_galaxy] + Select an engine to serve aritfacts such as + tools and workflows. Defaults to a local + Galaxy, but running Galaxy within a Docker + container. + --non_strict_cwl Disable strict validation of CWL. + --docker_galaxy_image TEXT Docker image identifier for docker-galaxy- + flavor used if engine type is specified as + ``docker-galaxy``. Defaults to + quay.io/bgruening/galaxy. + --docker_extra_volume PATH Extra path to mount if --engine docker. + --test_data DIRECTORY test-data directory to for specified tool(s). + --tool_data_table PATH tool_data_table_conf.xml file to for specified + tool(s). + --dependency_resolvers_config_file PATH + Dependency resolver configuration for Galaxy + to target. + --brew_dependency_resolution Configure Galaxy to use plain brew dependency + resolution. + --shed_dependency_resolution Configure Galaxy to use brewed Tool Shed + dependency resolution. + --no_dependency_resolution Configure Galaxy with no dependency resolvers. + --conda_prefix DIRECTORY Conda prefix to use for conda dependency + commands. + --conda_exec PATH Location of conda executable. + --conda_debug Enable more verbose conda logging. + --conda_channels, --conda_ensure_channels TEXT + Ensure conda is configured with specified + comma separated list of channels. + --conda_use_local Use locally built packages while building + Conda environments. + --conda_dependency_resolution Configure Galaxy to use only conda for + dependency resolution. + --conda_copy_dependencies Conda dependency resolution for Galaxy will + copy dependencies instead of attempting to + link them. + --conda_auto_install / --no_conda_auto_install + Conda dependency resolution for Galaxy will + attempt to install requested but missing + packages. + --conda_auto_init / --no_conda_auto_init + Conda dependency resolution for Galaxy will + auto install conda itself using miniconda if + not availabe on conda_prefix. + --profile TEXT Name of profile (created with the + profile_create command) to use with this + command. + --postgres Use postgres database type. + --database_type [postgres|postgres_docker|sqlite|auto] + Type of database to use for profile - 'auto', + 'sqlite', 'postgres', and 'postgres_docker' + are available options. Use postgres to use an + existing postgres server you user can access + without a password via the psql command. Use + postgres_docker to have Planemo manage a + docker container running postgres. Data with + postgres_docker is not yet persisted past when + you restart the docker container launched by + Planemo so be careful with this option. + --postgres_psql_path TEXT Name or or path to postgres client binary + (psql). + --postgres_database_user TEXT Postgres username for managed development + databases. + --postgres_database_host TEXT Postgres host name for managed development + databases. + --postgres_database_port TEXT Postgres port for managed development + databases. + --file_path DIRECTORY Location for files created by Galaxy (e.g. + database/files). + --database_connection TEXT Database connection string to use for Galaxy. + --shed_tool_conf TEXT Location of shed tools conf file for Galaxy. + --shed_tool_path TEXT Location of shed tools directory for Galaxy. + --galaxy_single_user / --no_galaxy_single_user + By default Planemo will configure Galaxy to + run in single-user mode where there is just + one user and this user is automatically logged + it. Use --no_galaxy_single_user to prevent + Galaxy from running this way. + --daemon Serve Galaxy process as a daemon. + --pid_file PATH Location of pid file is executed with + --daemon. + --ignore_dependency_problems When installing shed repositories for + workflows, ignore dependency issues. These + likely indicate a problem but in some cases + may not prevent a workflow from successfully + executing. + --shed_install / --no_shed_install + By default Planemo will attempt to install + repositories needed for workflow testing. This + may not be appropriate for production servers + and so this can disabled by calling planemo + with --no_shed_install. + --help Show this message and exit. + diff --git a/docs/commands/training_init.rst b/docs/commands/training_init.rst new file mode 100644 index 000000000..b26df02db --- /dev/null +++ b/docs/commands/training_init.rst @@ -0,0 +1,182 @@ + +``training_init`` command +====================================== + +This section is auto-generated from the help text for the planemo command +``training_init``. This help message can be generated with ``planemo training_init +--help``. + +**Usage**:: + + planemo training_init [OPTIONS] TOOL_PATH + +**Help** + +Build training template from workflow. +**Options**:: + + + --topic_name TEXT Name (directory name) of the topic to create + or in which a tutorial should be created or + updates [required] + --topic_title TEXT Title of the topic to create + --topic_summary TEXT Summary of the topic + --topic_target [use|admin-dev|instructors] + Target audience for the topic + --templates PATH Directory with the training templates + --tutorial_name TEXT Name (directory name) of the tutorial to + create or to modify + --tutorial_title TEXT Title of the tutorial + --hands_on Add hands-on for the new tutorial + --slides Add slides for the new tutorial + --workflow PATH Workflow of the tutorial (locally) + --galaxy_url TEXT URL of a Galaxy instance with the workflow + --galaxy_api_key TEXT API key on the Galaxy instance with the + workflow + --workflow_id TEXT ID of the workflow on the Galaxy instance + --zenodo_link TEXT Zenodo URL with the input data + --datatypes PATH YAML file with the correspondance between + Zenodo extension and Galaxy datatypes + --galaxy_root DIRECTORY Root of development galaxy directory to + execute command with. + --galaxy_database_seed PATH Preseeded Galaxy sqlite database to target. + --extra_tools PATH Extra tool sources to include in Galaxy's tool + panel (file or directory). These will not be + linted/tested/etc... but they will be + available to workflows and for interactive + use. + --install_galaxy Download and configure a disposable copy of + Galaxy from github. + --galaxy_branch TEXT Branch of Galaxy to target (defaults to + master) if a Galaxy root isn't specified. + --galaxy_source TEXT Git source of Galaxy to target (defaults to + the official galaxyproject github source if a + Galaxy root isn't specified. + --skip_venv Do not create or source a virtualenv + environment for Galaxy, this should be used or + instance to preserve an externally configured + virtual environment or conda environment. + --no_cache_galaxy Skip caching of Galaxy source and dependencies + obtained with --install_galaxy. Not caching + this results in faster downloads (no git) - so + is better on throw away instances such with + TravisCI. + --no_cleanup Do not cleanup temp files created for and by + Galaxy. + --galaxy_email TEXT E-mail address to use when launching single- + user Galaxy server. + --docker / --no_docker Run Galaxy tools in Docker if enabled. + --docker_cmd TEXT Command used to launch docker (defaults to + docker). + --docker_sudo / --no_docker_sudo + Flag to use sudo when running docker. + --docker_host TEXT Docker host to target when executing docker + commands (defaults to localhost). + --docker_sudo_cmd TEXT sudo command to use when --docker_sudo is + enabled (defaults to sudo). + --mulled_containers, --biocontainers + Test tools against mulled containers (forces + --docker). + --job_config_file PATH Job configuration file for Galaxy to target. + --tool_dependency_dir DIRECTORY + Tool dependency dir for Galaxy to target. + --port INTEGER Port to serve Galaxy on (default is 9090). + --host TEXT Host to bind Galaxy to. Default is 127.0.0.1 + that is restricted to localhost connections + for security reasons set to 0.0.0.0 to bind + Galaxy to all ports including potentially + publicly accessible ones. + --engine [galaxy|docker_galaxy|external_galaxy] + Select an engine to serve aritfacts such as + tools and workflows. Defaults to a local + Galaxy, but running Galaxy within a Docker + container. + --non_strict_cwl Disable strict validation of CWL. + --docker_galaxy_image TEXT Docker image identifier for docker-galaxy- + flavor used if engine type is specified as + ``docker-galaxy``. Defaults to + quay.io/bgruening/galaxy. + --docker_extra_volume PATH Extra path to mount if --engine docker. + --test_data DIRECTORY test-data directory to for specified tool(s). + --tool_data_table PATH tool_data_table_conf.xml file to for specified + tool(s). + --dependency_resolvers_config_file PATH + Dependency resolver configuration for Galaxy + to target. + --brew_dependency_resolution Configure Galaxy to use plain brew dependency + resolution. + --shed_dependency_resolution Configure Galaxy to use brewed Tool Shed + dependency resolution. + --no_dependency_resolution Configure Galaxy with no dependency resolvers. + --conda_prefix DIRECTORY Conda prefix to use for conda dependency + commands. + --conda_exec PATH Location of conda executable. + --conda_debug Enable more verbose conda logging. + --conda_channels, --conda_ensure_channels TEXT + Ensure conda is configured with specified + comma separated list of channels. + --conda_use_local Use locally built packages while building + Conda environments. + --conda_dependency_resolution Configure Galaxy to use only conda for + dependency resolution. + --conda_copy_dependencies Conda dependency resolution for Galaxy will + copy dependencies instead of attempting to + link them. + --conda_auto_install / --no_conda_auto_install + Conda dependency resolution for Galaxy will + attempt to install requested but missing + packages. + --conda_auto_init / --no_conda_auto_init + Conda dependency resolution for Galaxy will + auto install conda itself using miniconda if + not availabe on conda_prefix. + --profile TEXT Name of profile (created with the + profile_create command) to use with this + command. + --postgres Use postgres database type. + --database_type [postgres|postgres_docker|sqlite|auto] + Type of database to use for profile - 'auto', + 'sqlite', 'postgres', and 'postgres_docker' + are available options. Use postgres to use an + existing postgres server you user can access + without a password via the psql command. Use + postgres_docker to have Planemo manage a + docker container running postgres. Data with + postgres_docker is not yet persisted past when + you restart the docker container launched by + Planemo so be careful with this option. + --postgres_psql_path TEXT Name or or path to postgres client binary + (psql). + --postgres_database_user TEXT Postgres username for managed development + databases. + --postgres_database_host TEXT Postgres host name for managed development + databases. + --postgres_database_port TEXT Postgres port for managed development + databases. + --file_path DIRECTORY Location for files created by Galaxy (e.g. + database/files). + --database_connection TEXT Database connection string to use for Galaxy. + --shed_tool_conf TEXT Location of shed tools conf file for Galaxy. + --shed_tool_path TEXT Location of shed tools directory for Galaxy. + --galaxy_single_user / --no_galaxy_single_user + By default Planemo will configure Galaxy to + run in single-user mode where there is just + one user and this user is automatically logged + it. Use --no_galaxy_single_user to prevent + Galaxy from running this way. + --daemon Serve Galaxy process as a daemon. + --pid_file PATH Location of pid file is executed with + --daemon. + --ignore_dependency_problems When installing shed repositories for + workflows, ignore dependency issues. These + likely indicate a problem but in some cases + may not prevent a workflow from successfully + executing. + --shed_install / --no_shed_install + By default Planemo will attempt to install + repositories needed for workflow testing. This + may not be appropriate for production servers + and so this can disabled by calling planemo + with --no_shed_install. + --help Show this message and exit. + diff --git a/docs/organization.rst b/docs/organization.rst index 0aace800b..a4a4c5eb8 100644 --- a/docs/organization.rst +++ b/docs/organization.rst @@ -23,7 +23,7 @@ is currently too small to support full and open governance at this time. In order to keep things evolving quickly, it is better to keep procedures and process to a minimum and centralize important decisions with a trusted developer. The BDFN is explicitly meant to be replaced with a more formal and -democratice process if the project grows to a sufficient size or importance. +democratic process if the project grows to a sufficient size or importance. The *committers* group is the group of trusted developers and advocates who manage the Planemo code base. They assume many roles required to achieve @@ -44,6 +44,7 @@ Committers ============================== - Dannon Baker (@dannon) +- Bérénice Batut (@bebatut) - Martin Cech (@martenson) - John Chilton (@jmchilton) - Peter Cock (@peterjc) diff --git a/docs/planemo.commands.rst b/docs/planemo.commands.rst index c034307fa..25b5adcf6 100644 --- a/docs/planemo.commands.rst +++ b/docs/planemo.commands.rst @@ -428,6 +428,30 @@ planemo.commands.cmd\_tool\_init module :undoc-members: :show-inheritance: +planemo.commands.cmd\_training\_fill\_data\_library module +---------------------------------------------------------- + +.. automodule:: planemo.commands.cmd_training_fill_data_library + :members: + :undoc-members: + :show-inheritance: + +planemo.commands.cmd\_training\_generate\_from\_wf module +--------------------------------------------------------- + +.. automodule:: planemo.commands.cmd_training_generate_from_wf + :members: + :undoc-members: + :show-inheritance: + +planemo.commands.cmd\_training\_init module +------------------------------------------- + +.. automodule:: planemo.commands.cmd_training_init + :members: + :undoc-members: + :show-inheritance: + planemo.commands.cmd\_travis\_before\_install module ---------------------------------------------------- diff --git a/docs/planemo.rst b/docs/planemo.rst index b74831ab4..61b8ccbab 100644 --- a/docs/planemo.rst +++ b/docs/planemo.rst @@ -18,6 +18,7 @@ Subpackages planemo.shed planemo.shed2tap planemo.test + planemo.training planemo.xml Submodules diff --git a/docs/planemo.training.rst b/docs/planemo.training.rst new file mode 100644 index 000000000..e8829d83f --- /dev/null +++ b/docs/planemo.training.rst @@ -0,0 +1,46 @@ +planemo.training package +======================== + +Submodules +---------- + +planemo.training.tool\_input module +----------------------------------- + +.. automodule:: planemo.training.tool_input + :members: + :undoc-members: + :show-inheritance: + +planemo.training.topic module +----------------------------- + +.. automodule:: planemo.training.topic + :members: + :undoc-members: + :show-inheritance: + +planemo.training.tutorial module +-------------------------------- + +.. automodule:: planemo.training.tutorial + :members: + :undoc-members: + :show-inheritance: + +planemo.training.utils module +----------------------------- + +.. automodule:: planemo.training.utils + :members: + :undoc-members: + :show-inheritance: + + +Module contents +--------------- + +.. automodule:: planemo.training + :members: + :undoc-members: + :show-inheritance: diff --git a/planemo/__init__.py b/planemo/__init__.py index 0bc0fad36..af18da829 100644 --- a/planemo/__init__.py +++ b/planemo/__init__.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- -__version__ = '0.55.0.dev0' +__version__ = '0.56.0.dev0' PROJECT_NAME = "planemo" diff --git a/planemo/commands/cmd_test.py b/planemo/commands/cmd_test.py index 97d5092e6..01e602986 100644 --- a/planemo/commands/cmd_test.py +++ b/planemo/commands/cmd_test.py @@ -28,6 +28,10 @@ "previously.", default=False, ) +@click.option( + "--history_name", + help="Name for history (if a history is generated as part of testing.)" +) @options.galaxy_target_options() @options.galaxy_config_options() @options.test_options() diff --git a/planemo/commands/cmd_training_fill_data_library.py b/planemo/commands/cmd_training_fill_data_library.py new file mode 100644 index 000000000..d0d03db1a --- /dev/null +++ b/planemo/commands/cmd_training_fill_data_library.py @@ -0,0 +1,17 @@ +"""Module describing the planemo ``training_fill_data_library`` command.""" +import click + +from planemo import options +from planemo.cli import command_function +from planemo.training import Training + + +@click.command('training_fill_data_library') +@options.optional_tools_arg(multiple=True, allow_uris=True) +@options.training_fill_data_library_options() +@command_function +def cli(ctx, uris, **kwds): + """Build training template from workflow.""" + kwds["no_dependency_resolution"] = True + training = Training(kwds) + training.fill_data_library(ctx) diff --git a/planemo/commands/cmd_training_generate_from_wf.py b/planemo/commands/cmd_training_generate_from_wf.py new file mode 100644 index 000000000..8896de3dd --- /dev/null +++ b/planemo/commands/cmd_training_generate_from_wf.py @@ -0,0 +1,19 @@ +"""Module describing the planemo ``training_generate_from_wf`` command.""" + +import click + +from planemo import options +from planemo.cli import command_function +from planemo.training import Training + + +@click.command('training_generate_from_wf') +@options.optional_tools_arg(multiple=True, allow_uris=True) +@options.training_generate_tuto_from_wf_options() +@options.galaxy_serve_options() +@command_function +def cli(ctx, uris, **kwds): + """Create tutorial skeleton from workflow.""" + kwds["no_dependency_resolution"] = True + training = Training(kwds) + training.generate_tuto_from_wf(ctx) diff --git a/planemo/commands/cmd_training_init.py b/planemo/commands/cmd_training_init.py new file mode 100644 index 000000000..bb105ab0d --- /dev/null +++ b/planemo/commands/cmd_training_init.py @@ -0,0 +1,20 @@ +"""Module describing the planemo ``training_init`` command.""" + +import click + +from planemo import options +from planemo.cli import command_function +from planemo.training import Training + + +@click.command('training_init') +@options.optional_tools_arg(multiple=True, allow_uris=True) +@options.training_init_options() +# @options.force_option() +@options.galaxy_serve_options() +@command_function +def cli(ctx, uris, **kwds): + """Build training template from workflow.""" + kwds["no_dependency_resolution"] = True + training = Training(kwds) + training.init_training(ctx) diff --git a/planemo/options.py b/planemo/options.py index 95da73d3b..9dbec8f64 100644 --- a/planemo/options.py +++ b/planemo/options.py @@ -1123,6 +1123,130 @@ def galaxy_serve_options(): ) +def training_topic_name_option(): + return planemo_option( + "--topic_name", + required=True, + help="Name (directory name) of the topic to create or in which " + "a tutorial should be created or updates" + ) + + +def training_topic_option(): + return _compose( + training_topic_name_option(), + planemo_option( + "--topic_title", + default="Title of the topic", + help="Title of the topic to create"), + planemo_option( + "--topic_summary", + default="Summary of the topic", + help="Summary of the topic"), + planemo_option( + "--topic_target", + type=click.Choice(['use', 'admin-dev', 'instructors']), + default="use", + help="Target audience for the topic") + ) + + +def training_tutorial_name_option(): + return planemo_option( + "--tutorial_name", + help="Name (directory name) of the tutorial to create or to modify" + ) + + +def training_tutorial_name_req_option(): + return planemo_option( + "--tutorial_name", + required=True, + help="Name (directory name) of the tutorial to modify" + ) + + +def training_datatype_option(): + return planemo_option( + "--datatypes", + type=click.Path(file_okay=True, resolve_path=True), + help="YAML file with the correspondance between Zenodo extension and Galaxy datatypes", + default="shared/datatypes.yaml" + ) + + +def training_zenodo_option(): + return planemo_option( + "--zenodo_link", + help="Zenodo URL with the input data") + + +def training_tutorial_worflow_option(): + return _compose( + planemo_option( + "--workflow", + type=click.Path(file_okay=True, resolve_path=True), + help="Workflow of the tutorial (locally)", + default=None), + planemo_option( + "--galaxy_url", + help="URL of a Galaxy instance with the workflow"), + planemo_option( + "--galaxy_api_key", + help="API key on the Galaxy instance with the workflow"), + planemo_option( + "--workflow_id", + help="ID of the workflow on the Galaxy instance") + ) + + +def training_tutorial_option(): + return _compose( + training_tutorial_name_option(), + planemo_option( + "--tutorial_title", + default="Title of the tutorial", + help="Title of the tutorial"), + planemo_option( + "--hands_on", + is_flag=True, + default=True, + help="Add hands-on for the new tutorial"), + planemo_option( + "--slides", + is_flag=True, + default=False, + help="Add slides for the new tutorial"), + training_tutorial_worflow_option(), + training_zenodo_option() + ) + + +def training_init_options(): + return _compose( + training_topic_option(), + training_tutorial_option(), + training_datatype_option() + ) + + +def training_fill_data_library_options(): + return _compose( + training_topic_name_option(), + training_tutorial_name_req_option(), + training_zenodo_option(), + training_datatype_option() + ) + + +def training_generate_tuto_from_wf_options(): + return _compose( + training_topic_name_option(), + training_tutorial_name_req_option(), + training_tutorial_worflow_option() + ) + + def shed_fail_fast_option(): return planemo_option( "--fail_fast", diff --git a/planemo/training/__init__.py b/planemo/training/__init__.py new file mode 100644 index 000000000..971734a74 --- /dev/null +++ b/planemo/training/__init__.py @@ -0,0 +1,94 @@ +"""Module contains code for gtdk: Galaxy training development kit.""" + +from planemo.io import info +from .topic import Topic +from .tutorial import Tutorial + + +class Training: + """Class to describe a training.""" + + def __init__(self, kwds): + """Init an instance of Training.""" + self.kwds = kwds + self.topics_dir = "topics" + self.topic = Topic(parent_dir=self.topics_dir, name=kwds['topic_name']) + self.galaxy_url = kwds['galaxy_url'] if 'galaxy_url' in kwds else '' + self.galaxy_api_key = kwds['galaxy_api_key'] if 'galaxy_api_key' in kwds else '' + self.tuto = None + + def init_training(self, ctx): + """Create/update a topic/tutorial.""" + if not self.topic.exists(): + info("The topic %s does not exist. It will be created" % self.topic.name) + self.topic.init_from_kwds(self.kwds) + self.topic.create_topic_structure() + + if not self.kwds['tutorial_name']: + if self.kwds["slides"]: + raise Exception("A tutorial name is needed to create the skeleton of a tutorial slide deck") + if self.kwds['workflow'] or self.kwds['workflow_id']: + raise Exception("A tutorial name is needed to create the skeleton of the tutorial from a workflow") + if self.kwds['zenodo_link']: + raise Exception("A tutorial name is needed to add Zenodo information") + else: + self.tuto = Tutorial(training=self, topic=self.topic) + self.tuto.init_from_kwds(self.kwds) + if not self.tuto.exists(): + info("The tutorial %s in topic %s does not exist. It will be created." % (self.tuto.name, self.topic.name)) + self.tuto.create_tutorial(ctx) + + def check_topic_init_tuto(self): + """Check that the topic and tutorial are already there and retrieve them.""" + # check topic + if not self.topic.exists(): + raise Exception("The topic %s does not exists. It should be created" % self.topic.name) + self.topic.init_from_metadata() + # initiate the tutorial + self.tuto = Tutorial(training=self, topic=self.topic) + self.tuto.init_from_existing_tutorial(self.kwds['tutorial_name']) + if 'datatypes' in self.kwds: + self.tuto.datatype_fp = self.kwds['datatypes'] + if 'workflow' in self.kwds: + self.tuto.init_wf_fp = self.kwds['workflow'] + if 'workflow_id' in self.kwds: + self.tuto.init_wf_id = self.kwds['workflow_id'] + + def fill_data_library(self, ctx): + """Fill a data library for a tutorial.""" + self.check_topic_init_tuto() + # get the zenodo link + z_link = '' + if self.tuto.zenodo_link != '': + if self.kwds['zenodo_link']: + info("The data library and the metadata will be updated with the new Zenodo link") + z_link = self.kwds['zenodo_link'] + self.tuto.zenodo_link = z_link + else: + info("The data library will be extracted using the Zenodo link in the metadata of the tutorial") + z_link = self.tuto.zenodo_link + elif self.kwds['zenodo_link']: + info("The data library will be created and the metadata will be filled with the new Zenodo link") + z_link = self.kwds['zenodo_link'] + self.tuto.zenodo_link = z_link + + if z_link == '' or z_link is None: + raise Exception("A Zenodo link should be provided either in the metadata file or as argument of the command") + + # extract the data library from Zenodo + self.tuto.prepare_data_library_from_zenodo() + + # update the metadata + self.tuto.write_hands_on_tutorial() + + def generate_tuto_from_wf(self, ctx): + """Generate the skeleton of a tutorial from a workflow.""" + self.check_topic_init_tuto() + if self.tuto.has_workflow(): + info("Create tutorial skeleton from workflow") + self.tuto.create_hands_on_tutorial(ctx) + self.tuto.export_workflow_file() + else: + raise Exception( + "A path to a local workflow or the id of a workflow on a running Galaxy instance should be provided" + ) diff --git a/planemo/training/tool_input.py b/planemo/training/tool_input.py new file mode 100644 index 000000000..2c9475cc7 --- /dev/null +++ b/planemo/training/tool_input.py @@ -0,0 +1,234 @@ +"""Module contains code for the ToolInput class, dealing with the description of tool in workflow and XML.""" + +from planemo import templates +from planemo.io import info + + +INPUT_PARAM = """ +>{{space}}- *"{{param_label}}"*: `{{param_value}}` +""" + +INPUT_FILE_TEMPLATE = """ +>{{space}}- {{ '{%' }} icon {{icon}} {{ '%}' }} *"{{input_name}}"*: {{input_value}} +""" + +INPUT_SECTION = """ +>{{space}}- In *"{{section_label}}"*: +""" + +INPUT_ADD_REPEAT = """ +>{{space}}- Click on *"Insert {{repeat_label}}"*: +""" + +SPACE = ' ' + + +class ToolInput(): + """Class to describe a tool input / parameter and its value from a workflow.""" + + def __init__(self, tool_inp_desc, wf_param_values, wf_steps, level, should_be_there=False, force_default=False): + """Init an instance of ToolInput.""" + self.name = tool_inp_desc['name'] + if 'type' not in tool_inp_desc: + raise ValueError("No type for the parameter %s" % tool_inp_desc['name']) + self.type = tool_inp_desc['type'] + self.tool_inp_desc = tool_inp_desc + self.level = level + self.wf_param_values = wf_param_values + self.wf_steps = wf_steps + self.formatted_desc = '' + self.force_default = force_default + + if self.name not in self.wf_param_values: + if not should_be_there: + info("%s not in workflow" % self.name) + else: + raise ValueError("%s not in workflow" % self.name) + else: + self.wf_param_values = self.wf_param_values[self.name] + + def get_formatted_inputs(self): + """Format the inputs of a step.""" + inputlist = '' + inps = [] + if isinstance(self.wf_param_values, list): + # multiple input (not collection) + icon = 'param-files' + for i in self.wf_param_values: + inps.append('`%s` %s' % ( + i['output_name'], + get_input_tool_name(i['id'], self.wf_steps))) + else: + inp = self.wf_param_values + if 'id' in inp: + # sinle input or collection + inp_type = self.wf_steps[str(inp['id'])]['type'] + if 'collection' in inp_type: + icon = 'param-collection' + else: + icon = 'param-file' + inps = ['`%s` %s' % ( + inp['output_name'], + get_input_tool_name(inp['id'], self.wf_steps))] + if len(inps) > 0: + inputlist += templates.render(INPUT_FILE_TEMPLATE, **{ + "icon": icon, + "input_name": self.tool_inp_desc['label'], + "input_value": ', '.join(inps), + "space": SPACE * self.level + }) + return inputlist + + def get_lower_param_desc(self): + """Get the formatted description of the paramaters in the 'inputs' of the tool description.""" + sub_param_desc = '' + for inp in self.tool_inp_desc["inputs"]: + tool_inp = ToolInput( + inp, + self.wf_param_values, + self.wf_steps, + self.level + 1) + sub_param_desc += tool_inp.get_formatted_desc() + return sub_param_desc + + def get_formatted_section_desc(self): + """Format the description (label and value) for parameters in a section.""" + section_paramlist = '' + sub_param_desc = self.get_lower_param_desc() + if sub_param_desc != '': + section_paramlist += templates.render(INPUT_SECTION, **{ + 'space': SPACE * self.level, + 'section_label': self.tool_inp_desc['title']}) + section_paramlist += sub_param_desc + return section_paramlist + + def get_formatted_conditional_desc(self): + """Format the description (label and value) for parameters in a conditional.""" + conditional_paramlist = '' + # Get conditional parameter + inp = ToolInput( + self.tool_inp_desc['test_param'], + self.wf_param_values, + self.wf_steps, + self.level, + should_be_there=True, + force_default=True) + conditional_paramlist = inp.get_formatted_desc() + cond_param = inp.wf_param_values + + # Get parameters in the when and their values + tmp_tool_inp_desc = self.tool_inp_desc + for case in tmp_tool_inp_desc['cases']: + if case['value'] == cond_param and len(case['inputs']) > 0: + self.tool_inp_desc = case + conditional_paramlist += self.get_lower_param_desc() + self.tool_inp_desc = tmp_tool_inp_desc + return conditional_paramlist + + def get_formatted_repeat_desc(self): + """Format the description (label and value) for parameters in a repeat.""" + tool_inp = {} + for inp in self.tool_inp_desc["inputs"]: + tool_inp.setdefault(inp['name'], inp) + repeat_paramlist = '' + tmp_wf_param_values = self.wf_param_values + cur_level = self.level + for ind, param in enumerate(tmp_wf_param_values): + self.wf_param_values = param + self.level = cur_level + 1 + paramlist_in_repeat = self.get_lower_param_desc() + if paramlist_in_repeat != '': + # add first click + repeat_paramlist += templates.render(INPUT_ADD_REPEAT, **{ + 'space': SPACE * (self.level), + 'repeat_label': self.tool_inp_desc['title']}) + # add description of parameters in the repeat + repeat_paramlist += templates.render(INPUT_SECTION, **{ + 'space': SPACE * (self.level), + 'section_label': "%s: %s" % (ind+1, self.tool_inp_desc['title'])}) + repeat_paramlist += paramlist_in_repeat + self.level = cur_level + self.wf_param_values = tmp_wf_param_values + + repeat_desc = '' + if repeat_paramlist != '': + repeat_desc += templates.render(INPUT_SECTION, **{ + 'space': SPACE * self.level, + 'section_label': self.tool_inp_desc['title']}) + repeat_paramlist + return repeat_desc + + def get_formatted_other_param_desc(self): + """Get value of a 'simple' parameter if different from the default value, None otherwise.""" + param_value = None + if self.tool_inp_desc['value'] == self.wf_param_values and not self.force_default: + param_value = None + elif self.type == 'boolean': + if bool(self.tool_inp_desc['value']) == self.wf_param_values: + param_value = None + else: + param_value = 'Yes' if self.wf_param_values else 'No' + elif self.type == 'select': + param_values = [] + for opt in self.tool_inp_desc['options']: + if opt[1] == self.wf_param_values: + param_values.append(opt[0]) + param_value = ', '.join(param_values) + elif self.type == 'data_column': + param_value = "c%s" % self.wf_param_values + else: + param_value = self.wf_param_values + + param_desc = '' + if param_value is not None: + param_desc = templates.render(INPUT_PARAM, **{ + 'space': SPACE * self.level, + 'param_label': self.tool_inp_desc['label'], + 'param_value': param_value}) + return param_desc + + def get_formatted_desc(self): + """Get the formatted description (ready for hands-on tutorial) of the parameter.""" + if self.wf_param_values: + if self.type == 'data' or self.type == 'data_collection': + self.formatted_desc += self.get_formatted_inputs() + elif self.type == 'section': + self.formatted_desc += self.get_formatted_section_desc() + elif self.type == 'conditional': + self.formatted_desc += self.get_formatted_conditional_desc() + elif self.type == 'repeat': + self.formatted_desc += self.get_formatted_repeat_desc() + else: + self.formatted_desc += self.get_formatted_other_param_desc() + return self.formatted_desc + + +def get_input_tool_name(step_id, steps): + """Get the string with the name of the tool that generated an input.""" + inp_provenance = '' + inp_prov_id = str(step_id) + if inp_prov_id in steps: + name = steps[inp_prov_id]['name'] + if 'Input dataset' in name: + inp_provenance = "(%s)" % name + else: + inp_provenance = "(output of **%s** {%% icon tool %%})" % name + return inp_provenance + + +def get_empty_input(): + """Get the string for an empty input.""" + return templates.render(INPUT_FILE_TEMPLATE, **{ + 'space': 1*SPACE, + 'icon': 'param-file', + 'input_name': 'Input file', + 'input_value': 'File' + }) + + +def get_empty_param(): + """Get the string for an empty param.""" + return templates.render(INPUT_PARAM, **{ + 'space': 1*SPACE, + 'param_label': 'Parameter', + 'param_value': 'a value' + }) diff --git a/planemo/training/topic.py b/planemo/training/topic.py new file mode 100644 index 000000000..1187dded6 --- /dev/null +++ b/planemo/training/topic.py @@ -0,0 +1,240 @@ +"""Module contains code for the Topic class, dealing with the creation of a training topic.""" + +import collections +import os + +from planemo import templates +from .utils import ( + load_yaml, + Reference, + Requirement, + save_to_yaml +) + + +INDEX_FILE_TEMPLATE = """--- +layout: topic +topic_name: {{ topic }} +--- +""" + +README_FILE_TEMPLATE = """ +{{ topic }} +========== + +Please refer to the [CONTRIBUTING.md](../../CONTRIBUTING.md) before adding or updating any material +""" + + +DOCKER_FILE_TEMPLATE = """ +# Galaxy - {{ topic_title }} +# +# to build the docker image, go to root of training repo and +# docker build -t {{ topic_name }} -f topics/{{ topic_name }}/docker/Dockerfile . +# +# to run image: +# docker run -p "8080:80" -t {{ topic_name }} + +FROM bgruening/galaxy-stable + +MAINTAINER Galaxy Training Material + +ENV GALAXY_CONFIG_BRAND "GTN: {{ topic_title }}" + +# prerequisites +RUN pip install ephemeris -U +ADD bin/galaxy-sleep.py /galaxy-sleep.py + +# copy the tutorials directory for your topic +ADD topics/{{ topic_name }}/tutorials/ /tutorials/ + +# install everything for tutorials +ADD bin/docker-install-tutorials.sh /setup-tutorials.sh +ADD bin/mergeyaml.py /mergeyaml.py +RUN /setup-tutorials.sh +""" + + +INTRO_SLIDES_FILE_TEMPLATE = """--- +layout: introduction_slides +logo: "GTN" + +title: {{ title }} +type: {{ type }} +contributors: +- contributor +--- + +### How to fill the slide decks? + +Please follow our +[tutorial to learn how to fill the slides]({{ '{{' }} site.baseurl {{ '}}' }}/topics/contributing/tutorials/create-new-tutorial-slides/slides.html) +""" + + +class Topic: + """Class to describe a training topic.""" + + def __init__(self, name="new_topic", target="use", title="The new topic", summary="Summary", parent_dir="topics"): + """Init a topic instance.""" + self.name = name + self.type = target + self.title = title + self.summary = summary + self.docker_image = "" + self.maintainers = ["maintainers"] + self.parent_dir = parent_dir + self.set_default_requirement() + self.set_default_reference() + self.set_paths() + + def init_from_kwds(self, kwds): + """Init a topic instance from a kwds dictionary.""" + self.name = kwds["topic_name"] + self.type = kwds["topic_target"] + self.title = kwds["topic_title"] + self.summary = kwds["topic_summary"] + self.set_default_requirement() + self.set_default_reference() + self.set_paths() + + def init_from_metadata(self): + """Init a topic instance from the metadata file.""" + metadata = load_yaml(self.metadata_fp) + self.name = metadata['name'] + self.type = metadata['type'] + self.title = metadata['title'] + self.summary = metadata['summary'] + self.requirements = [] + for r in metadata['requirements']: + req = Requirement() + req.init_from_dict(r) + self.requirements.append(req) + if 'docker_image' in metadata: + self.docker_image = metadata['docker_image'] + self.maintainers = metadata['maintainers'] + self.references = [] + if 'references' in metadata: + for r in metadata['references']: + ref = Reference() + ref.init_from_dict(r) + self.references.append(ref) + self.set_paths() + + # GETTERS + def get_requirements(self): + """Get the requirements as a list of ordered dictionaries.""" + reqs = [] + for req in self.requirements: + reqs.append(req.export_to_ordered_dict()) + return reqs + + def get_references(self): + """Get the references as a list of ordered dictionaries.""" + refs = [] + for ref in self.references: + refs.append(ref.export_to_ordered_dict()) + return refs + + def export_metadata_to_ordered_dict(self): + """Export the topic metadata into an ordered dictionary.""" + metadata = collections.OrderedDict() + metadata['name'] = self.name + metadata['type'] = self.type + metadata['title'] = self.title + metadata['summary'] = self.summary + metadata['requirements'] = self.get_requirements() + metadata['docker_image'] = self.docker_image + metadata['maintainers'] = self.maintainers + metadata['references'] = self.get_references() + return metadata + + # SETTERS + def set_default_requirement(self): + """Set default requirement: Galaxy introduction.""" + self.requirements = [] + if self.type == 'use': + self.requirements.append(Requirement()) + + def set_default_reference(self): + """Set default refences: no information.""" + self.references = [] + if self.type == 'use': + self.references.append(Reference()) + + def set_paths(self): + """Set the paths to folder and files.""" + self.dir = os.path.join(self.parent_dir, self.name) + self.img_folder = os.path.join(self.dir, "images") + self.tuto_folder = os.path.join(self.dir, "tutorials") + self.index_fp = os.path.join(self.dir, "index.md") + self.readme_fp = os.path.join(self.dir, "README.md") + self.metadata_fp = os.path.join(self.dir, "metadata.yaml") + self.docker_folder = os.path.join(self.dir, "docker") + self.dockerfile_fp = os.path.join(self.docker_folder, "Dockerfile") + self.slides_folder = os.path.join(self.dir, "slides") + + # TESTS + def exists(self): + """Test if the topic exists.""" + return os.path.isdir(self.dir) + + # OTHER METHODS + def create_topic_structure(self): + """Create the skeleton of a new topic. + + 1. create the folder and its structure + 2. update the index.md to match your topic's name + 3. fill the metadata + 4. add a symbolic link to the metadata.yaml from the metadata folder + """ + # create the folder and its structure + os.makedirs(self.dir) + self.img_folder = os.path.join(self.dir, "images") + os.makedirs(self.img_folder) + self.tuto_folder = os.path.join(self.dir, "tutorials") + os.makedirs(self.tuto_folder) + + # create the index.md and add the topic name + self.index_fp = os.path.join(self.dir, "index.md") + with open(self.index_fp, 'w') as index_f: + index_f.write( + templates.render(INDEX_FILE_TEMPLATE, **{'topic': self.name})) + + # create the README file + self.readme_fp = os.path.join(self.dir, "README.md") + with open(self.readme_fp, 'w') as readme_f: + readme_f.write( + templates.render(README_FILE_TEMPLATE, **{'topic': self.title})) + + # create the metadata file + self.metadata_fp = os.path.join(self.dir, "metadata.yaml") + save_to_yaml(self.export_metadata_to_ordered_dict(), self.metadata_fp) + + # create Dockerfile + self.docker_folder = os.path.join(self.dir, "docker") + os.makedirs(self.docker_folder) + self.dockerfile_fp = os.path.join(self.docker_folder, "Dockerfile") + with open(self.dockerfile_fp, 'w') as dockerfile: + dockerfile.write( + templates.render( + DOCKER_FILE_TEMPLATE, + **{'topic_name': self.name, 'topic_title': self.title})) + + # create empty introduction slides + self.slides_folder = os.path.join(self.dir, "slides") + os.makedirs(self.slides_folder) + self.intro_slide_fp = os.path.join(self.slides_folder, "introduction.html") + with open(self.intro_slide_fp, 'w') as intro_slide_f: + intro_slide_f.write( + templates.render( + INTRO_SLIDES_FILE_TEMPLATE, + **{'title': "Introduction to %s" % self.title, 'type': "introduction"})) + + # add a symbolic link to the metadata.yaml + metadata_dir = "metadata" + if not os.path.isdir(metadata_dir): + os.makedirs(metadata_dir) + os.chdir(metadata_dir) + os.symlink(os.path.join("..", self.metadata_fp), "%s.yaml" % self.name) + os.chdir("..") diff --git a/planemo/training/tutorial.py b/planemo/training/tutorial.py new file mode 100644 index 000000000..cf319b77c --- /dev/null +++ b/planemo/training/tutorial.py @@ -0,0 +1,606 @@ +"""Module contains code for the Tutorial class, dealing with the creation of a training tutorial.""" + +import collections +import json +import os +import re +import shutil +from pprint import pprint + +import oyaml as yaml +import requests +import six + +from planemo import templates +from planemo.bioblend import galaxy +from planemo.engine import ( + engine_context, + is_galaxy_engine, +) +from planemo.io import info +from planemo.runnable import for_path +from .tool_input import ( + get_empty_input, + get_empty_param, + ToolInput +) +from .utils import ( + load_yaml, + save_to_yaml +) + +TUTO_HAND_ON_TEMPLATE = """--- +layout: tutorial_hands_on + +{{ metadata }} +--- + +{{ body }} +""" + +TUTO_SLIDES_TEMPLATE = """--- +layout: tutorial_slides +logo: "GTN" + +{{ metadata }} +--- + +### How to fill the slide decks? + +Please follow our +[tutorial to learn how to fill the slides]({{ '{{' }} site.baseurl {{ '}}' }}/topics/contributing/tutorials/create-new-tutorial-slides/slides.html) +""" + + +HANDS_ON_TOOL_BOX_TEMPLATE = """ +## Sub-step with **{{tool_name}}** + +> ### {{ '{%' }} icon hands_on {{ '%}' }} Hands-on: Task description +> +> 1. **{{tool_name}}** {{ '{%' }} icon tool {{ '%}' }} with the following parameters:{{inputlist}}{{paramlist}} +> +> ***TODO***: *Check parameter descriptions* +> +> ***TODO***: *Consider adding a comment or tip box* +> +> > ### {{ '{%' }} icon comment {{ '%}' }} Comment +> > +> > A comment about the tool or something else. This box can also be in the main text +> {: .comment} +> +{: .hands_on} + +***TODO***: *Consider adding a question to test the learners understanding of the previous exercise* + +> ### {{ '{%' }} icon question {{ '%}' }} Questions +> +> 1. Question1? +> 2. Question2? +> +> > ### {{ '{%' }} icon solution {{ '%}' }} Solution +> > +> > 1. Answer for question1 +> > 2. Answer for question2 +> > +> {: .solution} +> +{: .question} + +""" + +TUTO_HAND_ON_BODY_TEMPLATE = """ +# Introduction +{:.no_toc} + + + +General introduction about the topic and then an introduction of the +tutorial (the questions and the objectives). It is nice also to have a +scheme to sum up the pipeline used during the tutorial. The idea is to +give to trainees insight into the content of the tutorial and the (theoretical +and technical) key concepts they will learn. + +**Please follow our +[tutorial to learn how to fill the Markdown]({{ '{{' }} site.baseurl {{ '}}' }}/topics/contributing/tutorials/\ +create-new-tutorial-content/tutorial.html)** + +> ### Agenda +> +> In this tutorial, we will cover: +> +> 1. TOC +> {:toc} +> +{: .agenda} + +# Title for your first section + +Give some background about what the trainees will be doing in the section. + +Below are a series of hand-on boxes, one for each tool in your workflow file. +Often you may wish to combine several boxes into one or make other adjustments such +as breaking the tutorial into sections, we encourage you to make such changes as you +see fit, this is just a starting point :) + +Anywhere you find the word "***TODO***", there is something that needs to be changed +depending on the specifics of your tutorial. + +have fun! + +## Get data + +> ### {{ '{%' }} icon hands_on {{ '%}' }} Hands-on: Data upload +> +> 1. Create a new history for this tutorial +> 2. Import the files from [Zenodo]({{ zenodo_link }}) or from the shared data library +> +> ``` +> {{ z_file_links }} +> ``` +> ***TODO***: *Add the files by the ones on Zenodo here (if not added)* +> +> ***TODO***: *Remove the useless files (if added)* +> +> {{ '{%' }} include snippets/import_via_link.md {{ '%}' }} +> {{ '{%' }} include snippets/import_from_data_library.md {{ '%}' }} +> +> 3. Rename the datasets +> 4. Check that the datatype +> +> {{ '{%' }} include snippets/change_datatype.md datatype="datatypes" {{ '%}' }} +> +> 5. Add to each database a tag corresponding to ... +> +> {{ '{%' }} include snippets/add_tag.md {{ '%}' }} +> +{: .hands_on} + +# Title of the section usually corresponding to a big step in the analysis + +It comes first a description of the step: some background and some theory. +Some image can be added there to support the theory explanation: + +![Alternative text](../../images/image_name "Legend of the image") + +The idea is to keep the theory description before quite simple to focus more on the practical part. + +***TODO***: *Consider adding a detail box to expand the theory* + +> ### {{ '{%' }} icon details {{ '%}' }} More details about the theory +> +> But to describe more details, it is possible to use the detail boxes which are expandable +> +{: .details} + +A big step can have several subsections or sub steps: + +{{ body }} + +## Re-arrange + +To create the template, each step of the workflow had its own subsection. + +***TODO***: *Re-arrange the generated subsections into sections or other subsections. +Consider merging some hands-on boxes to have a meaningful flow of the analyses* + +# Conclusion +{:.no_toc} + +Sum up the tutorial and the key takeaways here. We encourage adding an overview image of the +pipeline used. +""" + + +class Tutorial: + """Class to describe a training tutorial.""" + + def __init__(self, training, topic, name="new_tuto", title="The new tutorial", zenodo_link=""): + """Init a tutorial instance.""" + self.training = training + self.topic = topic + self.name = name + self.title = title + self.zenodo_link = zenodo_link + self.zenodo_file_links = [] + self.questions = [] + self.objectives = [] + self.time = "" + self.key_points = [] + self.contributors = [] + self.body = "" + self.init_wf_fp = None + self.init_wf_id = None + self.hands_on = True + self.slides = False + self.datatype_fp = "" + self.set_dir_name() + self.init_data_lib() + self.body = templates.render(HANDS_ON_TOOL_BOX_TEMPLATE, **{ + 'tool_name': "My Tool", + 'inputlist': get_empty_input(), + 'paramlist': get_empty_param() + }) + + def init_from_kwds(self, kwds): + """Init a tutorial instance from a kwds dictionary.""" + self.name = kwds["tutorial_name"] + self.title = kwds["tutorial_title"] + self.zenodo_link = kwds["zenodo_link"] if kwds["zenodo_link"] else '' + self.questions = [ + "Which biological questions are addressed by the tutorial?", + "Which bioinformatics techniques is important to know for this type of data?"] + self.objectives = [ + "The learning objectives are the goals of the tutorial", + "They will be informed by your audience and will communicate to them and to yourself what you should focus on during the course", + "They are single sentence describing what a learner will be able to do once they have done the tutorial", + "You can use the Bloom's Taxonomy to write effective learning objectives"] + self.time = "3H" + self.key_points = [ + "The take-home messages", + "They will appear at the end of the tutorial"] + self.contributors = ["contributor1", "contributor2"] + self.init_wf_fp = kwds['workflow'] + self.init_wf_id = kwds['workflow_id'] + self.hands_on = kwds['hands_on'] + self.slides = kwds['slides'] + self.datatype_fp = kwds['datatypes'] + self.set_dir_name() + self.init_data_lib() + + def init_from_existing_tutorial(self, tuto_name): + """Init a tutorial instance from an existing tutorial (data library and tutorial.md).""" + self.name = tuto_name + self.set_dir_name() + + if not self.exists(): + raise Exception("The tutorial %s does not exists. It should be created" % self.name) + + # get the metadata information of the tutorial (from the top of the tutorial.md) + with open(self.tuto_fp, "r") as tuto_f: + tuto_content = tuto_f.read() + regex = '^---\n(?P[\s\S]*)\n---(?P[\s\S]*)' + tuto_split_regex = re.search(regex, tuto_content) + if not tuto_split_regex: + raise Exception("No metadata found at the top of the tutorial") + metadata = yaml.load(tuto_split_regex.group("metadata")) + self.title = metadata["title"] + self.zenodo_link = metadata["zenodo_link"] + self.questions = metadata["questions"] + self.objectives = metadata["objectives"] + self.time_estimation = metadata["time_estimation"] + self.key_points = metadata["key_points"] + self.contributors = metadata["contributors"] + + # the the tutorial content + self.body = tuto_split_regex.group("body") + + # get the data library + self.init_data_lib() + + def init_data_lib(self): + """Init the data library dictionary.""" + if os.path.exists(self.data_lib_fp): + self.data_lib = load_yaml(self.data_lib_fp) + else: + self.data_lib = collections.OrderedDict() + # set default information + self.data_lib.setdefault('destination', collections.OrderedDict()) + self.data_lib['destination']['type'] = 'library' + self.data_lib['destination']['name'] = 'GTN - Material' + self.data_lib['destination']['description'] = 'Galaxy Training Network Material' + self.data_lib['destination']['synopsis'] = 'Galaxy Training Network Material. See https://training.galaxyproject.org' + self.data_lib.setdefault('items', []) + self.data_lib.pop('libraries', None) + # get topic or create new one + topic = collections.OrderedDict() + for item in self.data_lib['items']: + if item['name'] == self.topic.title: + topic = item + if not topic: + self.data_lib['items'].append(topic) + topic['name'] = self.topic.title + topic['description'] = self.topic.summary + topic['items'] = [] + # get tutorial or create new one + self.tuto_data_lib = collections.OrderedDict() + for item in topic['items']: + if item['name'] == self.title: + self.tuto_data_lib = item + if not self.tuto_data_lib: + topic['items'].append(self.tuto_data_lib) + self.tuto_data_lib['name'] = self.title + self.tuto_data_lib['items'] = [] + + # GETTERS + def get_tuto_metata(self): + """Return the string corresponding to the tutorial metadata.""" + metadata = collections.OrderedDict() + metadata['title'] = self.title + metadata['zenodo_link'] = self.zenodo_link + metadata['questions'] = self.questions + metadata['objectives'] = self.objectives + metadata['time_estimation'] = self.time + metadata['key_points'] = self.key_points + metadata['contributors'] = self.contributors + return yaml.safe_dump( + metadata, + indent=2, + default_flow_style=False, + default_style='', + explicit_start=False) + + # SETTERS + def set_dir_name(self): + """Set the path to dir and files of a tutorial.""" + self.dir = os.path.join(self.topic.dir, "tutorials", self.name) + self.tuto_fp = os.path.join(self.dir, "tutorial.md") + self.slide_fp = os.path.join(self.dir, 'slides.html') + self.data_lib_fp = os.path.join(self.dir, "data_library.yaml") + self.wf_dir = os.path.join(self.dir, "workflows") + self.wf_fp = os.path.join(self.wf_dir, "main_workflow.ga") + self.tour_dir = os.path.join(self.dir, "tours") + # remove empty workflow file if there + empty_wf_filepath = os.path.join(self.wf_dir, "empty_workflow.ga") + if os.path.exists(empty_wf_filepath): + os.remove(empty_wf_filepath) + + # TEST METHODS + def exists(self): + """Test if the tutorial exists.""" + return os.path.isdir(self.dir) + + def has_workflow(self): + """Test if a workflow is provided for the tutorial.""" + return self.init_wf_fp or self.init_wf_id + + # EXPORT METHODS + def export_workflow_file(self): + """Copy or extract workflow file and add it to the tutorial directory.""" + if not os.path.exists(self.wf_dir): + os.makedirs(self.wf_dir) + if self.init_wf_fp: + shutil.copy(self.init_wf_fp, self.wf_fp) + elif self.init_wf_id: + gi = galaxy.GalaxyInstance(self.training.galaxy_url, key=self.training.galaxy_api_key) + gi.workflows.export_workflow_to_local_path( + self.init_wf_id, + self.wf_fp, + use_default_filename=False) + + # OTHER METHODS + def get_files_from_zenodo(self): + """Extract a list of URLs and dictionary describing the files from the JSON output of the Zenodo API.""" + z_record, req_res = get_zenodo_record(self.zenodo_link) + + self.zenodo_file_links = [] + if 'files' not in req_res: + raise ValueError("No files in the Zenodo record") + + files = [] + for f in req_res['files']: + file_dict = {'url': '', 'src': 'url', 'ext': '', 'info': self.zenodo_link} + if 'type' in f: + file_dict['ext'] = get_galaxy_datatype(f['type'], self.datatype_fp) + if 'links' not in f and 'self' not in f['links']: + raise ValueError("No link for file %s" % f) + file_dict['url'] = f['links']['self'] + self.zenodo_file_links.append(f['links']['self']) + files.append(file_dict) + + return (files, z_record) + + def prepare_data_library_from_zenodo(self): + """Get the list of URLs of the files on Zenodo, fill the data library, save it into the file.""" + self.zenodo_file_links = [] + if self.zenodo_link != '': + files, z_record = self.get_files_from_zenodo() + if z_record: + # get current data library and/or previous data library for the tutorial + # remove the latest tag of any existing library + # remove the any other existing library + current_data_lib = collections.OrderedDict() + previous_data_lib = collections.OrderedDict() + for item in self.tuto_data_lib['items']: + if item['name'] == "DOI: 10.5281/zenodo.%s" % z_record: + current_data_lib = item + elif item['description'] == 'latest': + previous_data_lib = item + previous_data_lib['description'] = '' + if not current_data_lib: + current_data_lib['name'] = "DOI: 10.5281/zenodo.%s" % z_record + current_data_lib['description'] = 'latest' + current_data_lib['items'] = [] + current_data_lib['items'] = files + + self.tuto_data_lib['items'] = [current_data_lib] + if previous_data_lib: + self.tuto_data_lib['items'].append(previous_data_lib) + save_to_yaml(self.data_lib, self.data_lib_fp) + + def write_hands_on_tutorial(self): + """Write the content of the hands-on tutorial in the corresponding file.""" + # add the zenodo links + self.body = templates.render(TUTO_HAND_ON_BODY_TEMPLATE, **{ + "z_file_links": "\n> ".join(self.zenodo_file_links), + "body": self.body + }) + # write in the tutorial file with the metadata on the top + metadata = self.get_tuto_metata() + with open(self.tuto_fp, 'w') as md: + md.write(templates.render(TUTO_HAND_ON_TEMPLATE, **{ + "metadata": metadata, + "body": self.body + })) + + def create_hands_on_tutorial(self, ctx): + """Create tutorial structure from the workflow file (if it is provided).""" + # load workflow and get hands-on body from the workflow + if self.init_wf_id: + if not self.training.galaxy_url: + raise ValueError("No Galaxy URL given") + if not self.training.galaxy_api_key: + raise ValueError("No API key to access the given Galaxy instance") + self.body = get_hands_on_boxes_from_running_galaxy(self.init_wf_id, self.training.galaxy_url, self.training.galaxy_api_key) + elif self.init_wf_fp: + self.body = get_hands_on_boxes_from_local_galaxy(self.training.kwds, self.init_wf_fp, ctx) + # write tutorial body + self.write_hands_on_tutorial() + + def create_tutorial(self, ctx): + """Create the skeleton of a new tutorial.""" + # create tuto folder and empty files + os.makedirs(self.dir) + os.makedirs(self.tour_dir) + os.makedirs(self.wf_dir) + + # extract the data library from Zenodo and the links for the tutorial + if self.zenodo_link != '': + info("Create the data library from Zenodo") + self.prepare_data_library_from_zenodo() + + # create tutorial skeleton from workflow and copy workflow file + if self.hands_on: + info("Create tutorial skeleton from workflow (if it is provided)") + self.create_hands_on_tutorial(ctx) + self.export_workflow_file() + + # create slide skeleton + if self.slides: + with open(self.slide_fp, 'w') as slide_f: + slide_f.write( + templates.render(TUTO_SLIDES_TEMPLATE, **{"metadata": self.get_tuto_metata()})) + + +def get_galaxy_datatype(z_ext, datatype_fp): + """Get the Galaxy datatype corresponding to a Zenodo file type.""" + g_datatype = '' + datatypes = load_yaml(datatype_fp) + if z_ext in datatypes: + g_datatype = datatypes[z_ext] + if g_datatype == '': + g_datatype = '# Please add a Galaxy datatype or update the shared/datatypes.yaml file' + info("Get Galaxy datatypes: %s --> %s" % (z_ext, g_datatype)) + return g_datatype + + +def get_zenodo_record(zenodo_link): + """Get the content of a Zenodo record.""" + # get the record in the Zenodo link + if 'doi' in zenodo_link: + z_record = zenodo_link.split('.')[-1] + else: + z_record = zenodo_link.split('/')[-1] + # get JSON corresponding to the record from Zenodo API + req = "https://zenodo.org/api/records/%s" % (z_record) + r = requests.get(req) + if r: + req_res = r.json() + else: + info("The Zenodo link (%s) seems invalid" % (zenodo_link)) + req_res = {'files': []} + z_record = None + return(z_record, req_res) + + +def get_wf_inputs(step_inp): + """Get the inputs from a workflow step and format them into a hierarchical dictionary.""" + inputs = {} + for inp_n, inp in step_inp.items(): + if '|' in inp_n: + repeat_regex = '(?P[^\|]*)_(?P\d+)\|(?P.+).+' + repeat_search = re.search(repeat_regex, inp_n) + hier_regex = '(?P[^\|]*)\|(?P.+)' + hier_regex = re.search(hier_regex, inp_n) + if repeat_search and repeat_search.start(0) <= hier_regex.start(0): + inputs.setdefault(repeat_search.group('prefix'), {}) + inputs[repeat_search.group('prefix')].setdefault( + repeat_search.group('nb'), + get_wf_inputs({hier_regex.group('suffix'): inp})) + else: + inputs.setdefault(hier_regex.group('prefix'), {}) + inputs[hier_regex.group('prefix')].update( + get_wf_inputs({hier_regex.group('suffix'): inp})) + else: + inputs.setdefault(inp_n, inp) + return inputs + + +def get_wf_param_values(init_params, inp_connections): + """Get the param values from a workflow step and format them into a hierarchical dictionary.""" + if not isinstance(init_params, six.string_types) or '": ' not in init_params: + form_params = init_params + else: + form_params = json.loads(init_params) + if isinstance(form_params, dict): + if '__class__' in form_params and form_params['__class__'] == 'RuntimeValue': + form_params = inp_connections + else: + for p in form_params: + inp = inp_connections[p] if p in inp_connections else {} + form_params[p] = get_wf_param_values(form_params[p], inp) + elif isinstance(form_params, list): + json_params = form_params + form_params = [] + for i, p in enumerate(json_params): + inp = inp_connections[str(i)] if str(i) in inp_connections else {} + form_params.append(get_wf_param_values(p, inp)) + elif isinstance(form_params, six.string_types) and '"' in form_params: + form_params = form_params.replace('"', '') + return form_params + + +def format_wf_steps(wf, gi): + """Get a string with the hands-on boxes describing the different steps of the worklow.""" + body = '' + steps = wf['steps'] + + for s in range(len(steps)): + print('format_wf_steps') + wf_step = steps[str(s)] + pprint(wf_step) + # get params in workflow + wf_param_values = {} + if wf_step['tool_state'] and wf_step['input_connections']: + wf_param_values = get_wf_param_values(wf_step['tool_state'], get_wf_inputs(wf_step['input_connections'])) + if not wf_param_values: + continue + # get tool description + try: + tool_desc = gi.tools.show_tool(wf_step['tool_id'], io_details=True) + except Exception: + tool_desc = {'inputs': []} + # get formatted param description + paramlist = '' + pprint(tool_desc) + pprint(wf_param_values) + print(type(wf_param_values)) + for inp in tool_desc["inputs"]: + pprint(inp) + tool_inp = ToolInput(inp, wf_param_values, steps, 1, should_be_there=True) + paramlist += tool_inp.get_formatted_desc() + # format the hands-on box + body += templates.render(HANDS_ON_TOOL_BOX_TEMPLATE, **{ + "tool_name": wf_step['name'], + "paramlist": paramlist}) + return body + + +def get_hands_on_boxes_from_local_galaxy(kwds, wf_filepath, ctx): + """Server local Galaxy and get the workflow dictionary.""" + assert is_galaxy_engine(**kwds) + runnable = for_path(wf_filepath) + tuto_body = '' + with engine_context(ctx, **kwds) as galaxy_engine: + with galaxy_engine.ensure_runnables_served([runnable]) as config: + workflow_id = config.workflow_id(wf_filepath) + wf = config.gi.workflows.export_workflow_dict(workflow_id) + tuto_body = format_wf_steps(wf, config.gi) + return tuto_body + + +def get_hands_on_boxes_from_running_galaxy(wf_id, galaxy_url, galaxy_api_key): + """Get the workflow dictionary from a running Galaxy instance with the workflow installed on it.""" + gi = galaxy.GalaxyInstance(galaxy_url, key=galaxy_api_key) + wf = gi.workflows.export_workflow_dict(wf_id) + tuto_body = format_wf_steps(wf, gi) + return tuto_body diff --git a/planemo/training/utils.py b/planemo/training/utils.py new file mode 100644 index 000000000..bfcc69bc4 --- /dev/null +++ b/planemo/training/utils.py @@ -0,0 +1,76 @@ +"""Module contains code for the Requirement, Reference and some general functions for training.""" + +import collections + +import oyaml as yaml + + +class Requirement: + """Class to describe a training requirement.""" + + def __init__(self, title="", req_type="internal", link="/introduction/"): + """Init a Requirement instance.""" + self.title = title + self.type = req_type + self.link = link + + def init_from_dict(self, dict): + """Init from a dictionary generated by export_to_ordered_dict.""" + self.title = dict['title'] + self.type = dict['type'] + self.link = dict['link'] + + def export_to_ordered_dict(self): + """Export the requirement into an ordered dictionary.""" + req = collections.OrderedDict() + req['title'] = self.title + req['type'] = self.type + req['link'] = self.link + return req + + +class Reference: + """Class to describe a training reference.""" + + def __init__(self, authors="authors et al", title="the title", link="link", summary="Why this reference is useful"): + """Init a Reference instance.""" + self.authors = authors + self.title = title + self.link = link + self.summary = summary + + def init_from_dict(self, dict): + """Init from a dictionary generated by export_to_ordered_dict.""" + self.authors = dict['authors'] + self.title = dict['title'] + self.link = dict['link'] + self.summary = dict['summary'] + + def export_to_ordered_dict(self): + """Export the reference into an ordered dictionary.""" + ref = collections.OrderedDict() + ref['authors'] = self.authors + ref['title'] = self.title + ref['link'] = self.link + ref['summary'] = self.summary + return ref + + +def load_yaml(filepath): + """Load the content of a YAML file to a dictionary.""" + with open(filepath, "r") as m_file: + content = yaml.load(m_file) + return content + + +def save_to_yaml(content, filepath): + """Save a dictionary to a YAML file.""" + with open(filepath, 'w') as stream: + yaml.safe_dump(content, + stream, + indent=2, + default_flow_style=False, + default_style='', + explicit_start=True, + encoding='utf-8', + allow_unicode=True) diff --git a/planemo/xml/xsd/tool/galaxy.xsd b/planemo/xml/xsd/tool/galaxy.xsd index 19a52917f..614f5497e 100644 --- a/planemo/xml/xsd/tool/galaxy.xsd +++ b/planemo/xml/xsd/tool/galaxy.xsd @@ -248,12 +248,12 @@ complete descriptions of the runtime of a tool. `` tag set. Third party programs or modules that the tool depends upon are included in this tag set. When a tool runs, Galaxy attempts to *resolve* these requirements (also called dependencies). ``requirement``s are meant to be abstract and resolvable by -multiple different systems (e.g. [conda](http://conda.pydata.org/docs/), the +multiple different systems (e.g. [conda](https://conda.io/), the [Galaxy Tool Shed dependency management system](https://galaxyproject.org/toolshed/tool-features/#Automatic_third-party_tool_dependency_installation_and_compilation_with_installed_repositories), or [environment modules](http://modules.sourceforge.net/)). @@ -297,12 +297,12 @@ resolver. - This value defines the which type of the 3rd party module required by this tool. + This value defines the type of the 3rd party module required by this tool. - For package type requirements this value defines a specific version of the tool dependency. + For requirements of type ``package`` this value defines a specific version of the tool dependency. @@ -311,7 +311,6 @@ resolver. - This value describes the type of container that the tool may be executed in and currently must be ``docker``. + This value describes the type of container that the tool may be executed in and currently must be ``docker``. @@ -371,7 +369,7 @@ Read more about configuring Galaxy to run Docker jobs - **Deprecated** Map a hook to a function defined in the code file. + *Deprecated*. Map a hook to a function defined in the code file. @@ -572,9 +570,9 @@ The content of ``stdout`` and ``stderr`` are strings containing the output of th This directive is used to specify some rarely modified options. - + - Deprecated, likely unused attribute. + *Deprecated*. Unused attribute. @@ -613,8 +611,8 @@ The content of ``stdout`` and ``stderr`` are strings containing the output of th tag sets. Any number of tests can be included, -and each test is wrapped within separate tag sets. Functional tests are +Container tag set to specify tests via the ```` tag sets. Any number of tests can be included, +and each test is wrapped within separate ```` tag sets. Functional tests are executed via [Planemo](https://planemo.readthedocs.io/) or the [run_tests.sh](https://github.com/galaxyproject/galaxy/blob/dev/run_tests.sh) shell script distributed with Galaxy. @@ -1340,6 +1338,11 @@ The functional test tool provides a demonstration of using this tag. ```xml + + + + + @@ -1358,6 +1361,7 @@ provides a demonstration of using this tag. ``` +Note that this tool uses ``assign_primary_output="true"`` for ````. Hence, the content of the first discovered dataset (which is the first in the alphabetically sorted list of discovered designations) is checked directly in the ```` tag of the test. ]]> @@ -1750,7 +1754,7 @@ managed by the Galaxy admins (for instance via [data managers](https://galaxyproject.org/admin/tools/data-managers/) ) and history files. A good example tool that demonstrates this is -the [Bowtie 2](https://github.com/galaxyproject/tools-devteam/blob/master/tools/bowtie2/bowtie2_wrapper.xml) wrapper. +the [Bowtie 2](https://github.com/galaxyproject/tools-iuc/blob/master/tools/bowtie2/bowtie2_wrapper.xml) wrapper. ```xml @@ -1870,7 +1874,7 @@ This part is contained in the ```` tag set. This Cheetah code can be used in the ```` tag set or the ```` tag set. -```xml +``` #for $i, $s in enumerate($series) rank_of_series=$i input_path='${s.input}' @@ -1901,7 +1905,7 @@ This is an example test case with multiple repeat elements for the example above See the documentation on the [repeat test directive](#tool-tests-test-repeat). -An older way to specify repeats in a test is by instances that are created by referring to names with a special format: "_|" +An older way to specify repeats in a test is by instances that are created by referring to names with a special format: ``_|`` ```xml @@ -1932,22 +1936,22 @@ demonstrates both testing strategies. - The title of the repeat section, which will be displayed on the tool form. + The title of the repeat section, which will be displayed on the tool form. - The minimum number of repeat units. + The minimum number of repeat units. - The maximum number of repeat units. + The maximum number of repeat units. - The default number of repeat units. + The default number of repeat units. @@ -2077,7 +2081,7 @@ These parameters represent whole number and real numbers, respectively. ##### Example -``` +```xml ``` @@ -2093,6 +2097,11 @@ $attribute_list:checked,truevalue,falsevalue:5 A dataset from the current history. Multiple types might be used for the param form. +#### ``group_tag`` + +$attribute_list:multiple,date_ref:5 + + ##### Examples The following will find all "coordinate interval files" contained within the @@ -2298,7 +2307,7 @@ parameter. - Deprecated way to specify default value for column parameters (use ``value`` instead). + *Deprecated*. Specify default value for column parameters (use ``value`` instead). @@ -2350,10 +2359,10 @@ as a comma separated list. @@ -2370,7 +2379,7 @@ but not always the tool's input dataset). - Used only if the ``type`` attribute + *Deprecated*. Used only if the ``type`` attribute value is ``data_column``, this is deprecated and the inverse of ``optional``. Set to ``false`` to not force user to select an option in the list. @@ -2503,6 +2512,7 @@ allow access to Python code to generate options for a select list. See + @@ -2525,7 +2535,9 @@ to the interpreter specified in the corresponding attribute (if any). The following uses a compiled executable ([bedtools](https://bedtools.readthedocs.io/en/latest/)). ```xml -bed12ToBed6 -i '$input' > '$output' + '$output' +]]]]> ``` A few things to note about even this simple example: @@ -2547,26 +2559,26 @@ tool. ```xml "${output}" +-i '${inputA}' +#if $names.names_select == 'yes': + -files + #for $bed in $names.beds: + '${bed.input}' + #end for + -names + #for $bed in $names.beds: + '${bed.inputName}' + #end for +#else: + #set files = '" "'.join([str($file) for $file in $names.beds]) + -files '${files}' + #set names = '" "'.join([str($name.display_name) for $name in $names.beds]) + -names '${names}' +#end if +$strand +$counts +$both +> '${output}' ]]]]> ``` @@ -2663,15 +2675,13 @@ Name | Description See the [Planemo docs](https://planemo.readthedocs.io/en/latest/writing_advanced.html#cluster-usage) on the topic of ``GALAXY_SLOTS`` for more information and examples. -### Attributes +### Error detection -#### ``detect_errors`` +The ``detect_errors`` attribute of ``command``, if present, can be one of: -If present on the ``command`` tag, this attribute can be one of: - -* ``default`` no-op fallback to ``stdio`` tags and erroring on standard error output (for legacy tools). -* ``exit_code`` error if tool exit code is not 0. (The @jmchilton recommendation). -* ``aggressive`` error if tool exit code is not 0 or ``Exception:``, ``Error:``, or +* ``default``: no-op fallback to ``stdio`` tags and erroring on standard error output (for legacy tools). +* ``exit_code``: error if tool exit code is not 0. (The @jmchilton recommendation). +* ``aggressive``: error if tool exit code is not 0 or ``Exception:``, ``Error:``, or various messages related to being out of memory appear in the standard error or output. (The @bgruening recommendation). @@ -2681,46 +2691,33 @@ produces any standard error output). See [pull request 117](https://github.com/galaxyproject/galaxy/pull/117) for more implementation information and discussion on the ``detect_errors`` attribute. - -#### ``strict`` - -This boolean forces the ``#set -e`` directive on in shell scripts - so that in a -multi-part command if any part fails the job exits with a non-zero exit code. -This is enabled by default for tools with ``profile>=16.04`` and disabled on -legacy tools. - -#### ``interpreter`` - -Older tools may define an ``intepreter`` attribute on the command, but this is -deprecated and using the ``$__tool_directory__`` variable is superior. - ]]> - + One of ``default``, ``exit_code``, ``aggressive``. See "Error detection" above for details. - Only used if ``detect_errors="exit_code", tells Galaxy the specified exit code indicates an out of memory error. Galaxy instances may be configured to retry such jobs on resources with more memory. + Only used if ``detect_errors="exit_code"``, tells Galaxy the specified exit code indicates an out of memory error. Galaxy instances may be configured to retry such jobs on resources with more memory. - When running a job for this tool, do not isolate its $HOME directory within the job's directory - use either the shared_home_dir setting in Galaxy or the default $HOME specified in the job's default environment. + When running a job for this tool, do not isolate its ``$HOME`` directory within the job's directory - use either the ``shared_home_dir`` setting in Galaxy or the default ``$HOME`` specified in the job's default environment. - This attribute defines the programming language in which the tool's executable file is written. Any language can be used (tools can be written in Python, C, Perl, Java, etc.). The executable file must be in the same directory of the XML file. If instead this attribute is not specified, the tag content should be a Bash command calling executable(s) available in the $PATH. + '$__tool_directory__/'`` in the tag content. If this attribute is not specified, the tag should contain a Bash command calling executable(s) available in the ``$PATH``, as modified after loading the requirements.]]> - + This boolean forces the ``#set -e`` directive on in shell scripts - so that in a multi-part command if any part fails the job exits with a non-zero exit code. This is enabled by default for tools with ``profile>=16.04`` and disabled on legacy tools. @@ -3061,7 +3058,7 @@ target file. See the [annotation_profiler](https://github.com/galaxyproject/tools-devteam/blob/master/tools/annotation_profiler/annotation_profiler.xml) tool for an example of how to use this tag set. This tag set is contained within -the tag set - it applies a validator to the containing parameter. +the ```` tag set - it applies a validator to the containing parameter. ### Examples @@ -3130,7 +3127,7 @@ Valid values include: ``expression``, ``regex``, ``in_range``, ``length``, -The error message displayed on the tool form if validation fails. +The error message displayed on the tool form if validation fails. @@ -3232,7 +3229,7 @@ contained within the ```` tag set - it contains a set of ```` and ### Character presets The following presets can be used when specifying the valid characters: the -[constants from the ``string`` Python3 module](https://docs.python.org/3/library/string.html#string-constants), +[constants](https://docs.python.org/3/library/string.html#string-constants) from the ``string`` Python3 module, plus ``default`` (equal to ``string.ascii_letters + string.digits + " -=_.()/+*^,:?!"``) and ``none`` (empty set). The ``string.letters``, ``string.lowercase`` and ``string.uppercase`` Python2 @@ -3247,10 +3244,12 @@ of the default ``X``, so invalid characters are effectively dropped instead of replaced with ``X``) and indicates that the only valid characters for this input are ASCII letters, digits, and ``_``. -``` +```xml - + + + ``` @@ -3258,13 +3257,13 @@ are ASCII letters, digits, and ``_``. This example allows many more valid characters and specifies that ``&`` will just be dropped from the input. -``` +```xml - + - + ``` @@ -3317,7 +3316,8 @@ valid input for the mapping to occur. - Add the characters contained in the specified character preset (as defined above) to the list of valid characters. + Add the characters contained in the specified character preset (as defined above) to the list of valid characters. The default +is the ``none`` preset. @@ -3330,12 +3330,12 @@ valid input for the mapping to occur. This directive is used to remove individual characters or preset lists of characters. -Character must not be allowed as a valid input for the mapping to occur. -Preset lists include default and none as well as those available from string.* (e.g. ``string.printable``). +Character must not be allowed as a valid input for the mapping to occur. - Remove the characters contained in the specified character preset (as defined above) from the list of valid characters. + Remove the characters contained in the specified character preset (as defined above) from the list of valid characters. The default +is the ``none`` preset. @@ -3352,7 +3352,7 @@ Preset lists include default and none as well as those available from string.* ( - tag set. Used to specify a mapping of disallowed character to replacement string. Contains and tags.]]> + `` tag set. Used to specify a mapping of disallowed character to replacement string. Contains ```` and ```` tags.]]> @@ -3604,24 +3604,24 @@ the ```` documentation. @@ -3648,7 +3648,7 @@ This tag set is contained within the ```` tag set, and it defines the output data description for the files resulting from the tool's execution. The value of the attribute ``label`` can be acquired from input parameters or metadata in the same way that the command line parameters are (discussed in the - tag set section above). +```` tag set section above). ### Examples @@ -3762,7 +3762,7 @@ The valid values for format can be found in Sets the source of element identifier to the specified input. -This only applies to collections that are mapped over a non-collection input and that have equivalent structures. +This only applies to collections that are mapped over a non-collection input and that have equivalent structures. If this references input elements in conditionals, this value should be qualified (e.g. ``cond|input`` instead of ``input`` if ``input`` is in a conditional with ``name="cond"``). @@ -3818,7 +3818,7 @@ metadata in the same way that the command line parameters are (discussed in the [command](#tool-command) directive). Creating collections in tools is covered in-depth in -[planemo's documentation](https://planemo.readthedocs.io/en/latest/writing_advanced.html#creating-collections). +[Planemo's documentation](https://planemo.readthedocs.io/en/latest/writing_advanced.html#creating-collections). ]]> @@ -3867,7 +3867,9 @@ derive collection's type (e.g. ``collection_type``) from. This is the name of input collection or dataset to derive "structure" of the output from (output element count and identifiers). For instance, if the referenced input has three ordered items with -identifiers ``sample1``, ``sample2``, and ``sample3`` +identifiers ``sample1``, ``sample2``, and ``sample3``. If this references input +elements in conditionals, this value should be qualified (e.g. ``cond|input`` instead +of ``input`` if ``input`` is in a conditional with ``name="cond"``). @@ -3900,12 +3902,12 @@ conditionals are accessed using a hash named after the conditional. - + - + @@ -4046,7 +4048,7 @@ The ```` in the Bowtie 2 wrapper is used in lieu of the deprecated ```` tag to set the ``dbkey`` of the output dataset. In [bowtie2_wrapper.xml](https://github.com/galaxyproject/tools-devteam/blob/master/tools/bowtie2/bowtie2_wrapper.xml) (see below), according to the first action block, if the -```reference_genome.source`` is ``indexed`` (not ``history``), then it will assign +``reference_genome.source`` is ``indexed`` (not ``history``), then it will assign the ``dbkey`` of the output file to be the same as that of the reference file. It does this by looking at through the data table and finding the entry that has the value that's been selected in the index dropdown box as column 1 of the loc file @@ -4163,7 +4165,7 @@ In addition to demonstrating this lower-level direct access of .loc files, it demonstrates an unconditional action. The second block would not be needed for most cases - it was required in this tool to handle the specific case of a small reference file used for functional testing. It says that if the dbkey has been -set to ``equCab2chrM`` (which is what the ````` tag does), then it should be changed to ``equCab2`` (which is the `` + + + Set to 'paths' to include dataset paths in the resulting file. + + @@ -4630,7 +4640,7 @@ Examples are included in the test tools directory including: - '$__tool_directory__/'``.]]> + '$__tool_directory__/'`` in the tag content. If this attribute is not specified, the tag should contain a Bash command calling executable(s) available in the ``$PATH``, as modified after loading the requirements.]]> @@ -4809,7 +4819,7 @@ The target value (e.g. for setting data format: the list of supported data forma Legacy tools (ones with ``profile`` unspecified or a ``profile`` of less than 16.04) will default to checking stderr for errors as described above. Newer tools will instead treat an exit code other than 0 as an error. The -``detect_error`` on ``command`` can swap between these behaviors but the +``detect_errors`` on ``command`` can swap between these behaviors but the ``stdio`` directive allows more options in defining error conditions (though these aren't always intuitive). @@ -4846,33 +4856,34 @@ checked.]]> tag defines a range of exit codes, and each range can be associated with a description of the error (e.g., "Out of Memory", "Invalid Sequence File") and an error level. The description just describes the condition and can be anything. The error level is either a warning or a fatal error. A warning means that stderr will be updated with the error's description. A fatal error means that the tool's execution will be marked as having an error and the workflow will stop. Note that, if the error level is not supplied, then a fatal error is assumed to have occurred. +Tools may use exit codes to indicate specific execution errors. Many programs use 0 to indicate success and non-zero exit codes to indicate errors. Galaxy allows each tool to specify exit codes that indicate errors. Each ```` tag defines a range of exit codes, and each range can be associated with a description of the error (e.g., "Out of Memory", "Invalid Sequence File") and an error level. The description just describes the condition and can be anything. The error level is either log, warning, fatal error, or fatal_oom. A warning means that stderr will be updated with the error's description. A fatal error means that the tool's execution will be marked as having an error and the workflow will stop. A fatal_oom indicates an out of memory condition and the job might be resubmitted if Galaxy is configured appropriately. Note that, if the error level is not supplied, then a fatal error is assumed to have occurred. -The exit code's range can be any consecutive group of integers. More advanced ranges, such as noncontiguous ranges, are currently not supported. Ranges can be specified in the form "m:n", where m is the start integer and n is the end integer. If ":n" is specified, then the exit code will be compared against all integers less than or equal to n. If "m:" is used, then the exit code will be compared against all integers greater than or equal to m. If the exit code matches, then the error level is applied and the error's description is added to stderr. If a tool's exit code does not match any of the supplied tags' ranges, then no errors are applied to the tool's execution. +The exit code's range can be any consecutive group of integers. More advanced ranges, such as noncontiguous ranges, are currently not supported. Ranges can be specified in the form "m:n", where m is the start integer and n is the end integer. If ":n" is specified, then the exit code will be compared against all integers less than or equal to n. If "m:" is used, then the exit code will be compared against all integers greater than or equal to m. If the exit code matches, then the error level is applied and the error's description is added to stderr. If a tool's exit code does not match any of the supplied ```` tags' ranges, then no errors are applied to the tool's execution. Note that most Unix and Linux variants only support positive integers 0 to 255 for exit codes. If an exit code falls out of the range 0 to 255, the usual convention is to only use the lower 8 bits for the exit code. The only known exception is if a job is broken into subtasks using the tasks runner and one of those tasks is stopped with a POSIX signal. (Note that signals should be used as a last resort for terminating processes.) In those cases, the task will receive -1 times the signal number. For example, suppose that a job uses the tasks runner and 8 tasks are created for the job. If one of the tasks hangs, then a sysadmin may choose to send the "kill" signal, SIGKILL, to the process. In that case, the task (and its job) will exit with an exit code of -9. More on POSIX signals can be found at https://en.wikipedia.org/wiki/Unix_signal as well as man pages on "signal". -The tag's supported attributes are as follows: +The ```` tag's supported attributes are as follows: * ``range``: This indicates the range of exit codes to check. The range can be one of the following: * ``n``: the exit code will only be compared to n; * ``[m:n]``: the exit code must be greater than or equal to m and less than or equal to n; * ``[m:]``: the exit code must be greater than or equal to m; * ``[:n]``: the exit code must be less than or equal to n. -* ``level``: This indicates the error level of the exit code. The level can have one of two values: - * ``warning``: If an exit code falls in the given range, then a description of the error will be added to the beginning of stderr. A warning-level error will not cause the tool to fail. - * ``fatal``: If an exit code falls in the given range, then a description of the error will be added to the beginning of stderr. A fatal-level error will cause the tool to fail. If no level is specified, then the fatal error level will be assumed to have occurred. +* ``level``: This indicates the error level of the exit code. If no level is specified, then the fatal error level will be assumed to have occurred. The level can have one of following values: + * ``log`` and ``warning``: If an exit code falls in the given range, then a description of the error will be added to the beginning of the source, prepended with either 'Log:' or 'Warning:'. A log-level/warning-level error will not cause the tool to fail. + * ``fatal``: If an exit code falls in the given range, then a description of the error will be added to the beginning of stderr. A fatal-level error will cause the tool to fail. + * ``fatal_oom``: If an exit code falls in the given range, then a description of the error will be added to the beginning of stderr. Depending on the job configuration, a fatal_oom-level error will cause the tool to be resubmitted or fail. * ``description``: This is an optional description of the error that corresponds to the exit code. -The following is an example of the tag: +The following is an example of the ```` tag: ```xml - + - + ``` @@ -4926,10 +4937,11 @@ A regular expression includes the following attributes: * ``stdout``: the regular expression will be applied to stdout; * ``stderr``: the regular expression will be applied to stderr; * ``both``: the regular expression will be applied to both stderr and stdout (which is the default case). -* ``match``: This is the regular expression that will be used to match against stdout and/or stderr. If the tag does not contain the match attribute, then the tag will be ignored. The regular expression can be any valid Python regular expression. All regular expressions are performed case insensitively. For example, if match contains the regular expression "actg", then the regular expression will match against "actg", "ACTG", "AcTg", and so on. Also note that, if double quotes (") are to be used in the match attribute, then the value " can be used in place of double quotes. Likewise, if single quotes (') are to be used in the match attribute, then the value ' can be used if necessary. -* ``level``: This works very similarly to the tag, except that, when a regular expression matches against its source, the description is added to the beginning of the source. For example, if stdout matches on a regular expression, then the regular expression's description is added to the beginning of stdout (instead of stderr). The level can be log, warning or fatal as described below. +* ``match``: This is the regular expression that will be used to match against stdout and/or stderr. If the ```` tag does not contain the match attribute, then the ```` tag will be ignored. The regular expression can be any valid Python regular expression. All regular expressions are performed case insensitively. For example, if match contains the regular expression "actg", then the regular expression will match against "actg", "ACTG", "AcTg", and so on. Also note that, if double quotes (") are to be used in the match attribute, then the value " can be used in place of double quotes. Likewise, if single quotes (') are to be used in the match attribute, then the value ' can be used if necessary. +* ``level``: This works very similarly to the ```` tag, except that, when a regular expression matches against its source, the description is added to the beginning of the source. For example, if stdout matches on a regular expression, then the regular expression's description is added to the beginning of stdout (instead of stderr). If no level is specified, then the fatal error level will be assumed to have occurred. The level can have one of following values: * ``log`` and ``warning``: If the regular expression matches against its source input (i.e., stdout and/or stderr), then a description of the error will be added to the beginning of the source, prepended with either 'Log:' or 'Warning:'. A log-level/warning-level error will not cause the tool to fail. - * ``fatal``: If the regular expression matches against its source input, then a description of the error will be added to the beginning of the source. A fatal-level error will cause the tool to fail. If no level is specified, then the fatal error level will be assumed to have occurred. + * ``fatal``: If the regular expression matches against its source input, then a description of the error will be added to the beginning of the source. A fatal-level error will cause the tool to fail. + * ``fatal_oom``: In contrast to fatal the job might be resubmitted if possible according to the job configuration. * ``description``: Just like its ``exit_code`` counterpart, this is an optional description of the regular expression that has matched. The following is an example of regular expressions that may be used: @@ -4972,10 +4984,14 @@ prepended with the string ``Fatal: Unknown error encountered``. Note that, if stderr contained ``error``, ``ERROR``, or ``ErRor`` then it would not matter - stderr was not being scanned. -If the second regular expression did not match, then the third regular -expression is checked. The third regular expression does not contain an error -level, so an error level of ``fatal`` is assumed. The third regular expression -also does not contain a source, so both stdout and stderr are checked. The third +If the second regular expression does not match, the regular expression "out of memory" +is checked on stdout. If found, Galaxy tries to resubmit the job with more memory +if configured correctly, otherwise the job fails. + +If the previous regular expressions does not match, then the fourth regular +expression is checked. The fourth regular expression does not contain an error +level, so an error level of ``fatal`` is assumed. The fourth regular expression +also does not contain a source, so both stdout and stderr are checked. The fourth regular expression looks for 12 consecutive "C"s or "G"s in any order and in uppercase or lowercase. If stdout contained ``cgccGGCCcGGcG`` or stderr contained ``CCCCCCgggGGG``, then the regular expression would match, the tool @@ -4983,7 +4999,7 @@ would be marked with a fatal error, and the stream that contained the 12-nucleotide CG island would be prepended with ``Fatal: Fatal error - CG island 12 nts long found``. -Finally, if the tool did not match any of the fatal errors, then the fourth +Finally, if the tool did not match any of the fatal errors, then the fifth regular expression is checked. Since no source is specified, both stdout and stderr are checked. If ``Branch A`` is at the beginning of stdout or stderr, then a warning will be registered and the source that contained ``Branch A`` will be @@ -4993,12 +5009,12 @@ prepended with the warning ``Warning: Branch A was taken in execution``. - This tells whether the regular expression should be matched against stdout, stderr, or both. If this attribute is missing or is incorrect, then both stdout and stderr will be checked. The source can be one of the follwing values: + This tells whether the regular expression should be matched against stdout, stderr, or both. If this attribute is missing or is incorrect, then both stdout and stderr will be checked. The source can be one of the following values: - This is the regular expression that will be used to match against stdout and/or stderr. + This is the regular expression that will be used to match against stdout and/or stderr. @@ -5092,12 +5108,12 @@ for a list of supported formats. - Deprecated. + *Deprecated*. - Deprecated. + *Deprecated*. @@ -5337,6 +5353,7 @@ and ``bibtex`` are the only supported options. Documentation for LevelType + diff --git a/scripts/run_galaxy_workflow_tests.sh b/scripts/run_galaxy_workflow_tests.sh index 133e8cf34..40d048fae 100644 --- a/scripts/run_galaxy_workflow_tests.sh +++ b/scripts/run_galaxy_workflow_tests.sh @@ -11,8 +11,7 @@ : ${PLANEMO_SERVE_DATABASE_TYPE:="postgres"} # used if not using Docker with PLANEMO_TEST_STYLE : ${PLANEMO_DOCKER_GALAXY_IMAGE:="quay.io/bgruening/galaxy:18.01"} # used if used Docker with PLANEMO_TEST_STYLE : ${PLANEMO_VIRTUAL_ENV:=".venv"} - -GALAXY_URL="http://localhost:$PLANEMO_SERVE_PORT" +: ${GALAXY_URL:="http://localhost:$PLANEMO_SERVE_PORT"} # Ensure Planemo is installed. if [ ! -d "${PLANEMO_VIRTUAL_ENV}" ]; then @@ -102,6 +101,19 @@ elif [ "$PLANEMO_TEST_STYLE" = "manual_docker_run_and_test" ]; then --galaxy_admin_key admin \ --galaxy_user_key admin \ "$1" +elif [ "$PLANEMO_TEST_STYLE" = "external_galaxy" ]; then + if [[ -n $PLANEMO_INSTALL_TOOLS ]]; then + INSTALL_TOOLS=""; + else + INSTALL_TOOLS="--no_shed_install"; + fi + planemo $PLANEMO_OPTIONS test \ + --engine external_galaxy \ + --galaxy_url "$GALAXY_URL" \ + --galaxy_admin_key "$PLANEMO_ADMIN_KEY" \ + --galaxy_user_key "$PLANEMO_USER_KEY" \ + $INSTALL_TOOLS \ + "$1" else echo "Unknown test style ${PLANEMO_TEST_STYLE}" exit 1 diff --git a/setup.py b/setup.py index f441bcfcb..7a5bbfd6e 100644 --- a/setup.py +++ b/setup.py @@ -58,6 +58,7 @@ def get_var(var_name): 'planemo.shed', 'planemo.shed2tap', 'planemo.test', + 'planemo.training', 'planemo.xml', ] ENTRY_POINTS = ''' diff --git a/tests/data/training_datatypes.yaml b/tests/data/training_datatypes.yaml new file mode 100644 index 000000000..ed27c75e3 --- /dev/null +++ b/tests/data/training_datatypes.yaml @@ -0,0 +1,3 @@ +--- +csv: csv +test: strange_datatype \ No newline at end of file diff --git a/tests/data/training_metadata.yaml b/tests/data/training_metadata.yaml new file mode 100644 index 000000000..bfa13b156 --- /dev/null +++ b/tests/data/training_metadata.yaml @@ -0,0 +1,13 @@ +--- +name: test +type: use +title: Test +summary: 'Summary' +edam_ontology: '' +requirements: +- title: Galaxy introduction + type: internal + link: /introduction/ +maintainers: +- maintainer1 +- maintainer2 \ No newline at end of file diff --git a/tests/data/training_query_tabular.json b/tests/data/training_query_tabular.json new file mode 100644 index 000000000..1507c75ab --- /dev/null +++ b/tests/data/training_query_tabular.json @@ -0,0 +1,893 @@ +{ + "model_class": "Tool", + "version": "2.0.0", + "id": "toolshed.g2.bx.psu.edu/repos/iuc/query_tabular/query_tabular/2.0.0", + "inputs": [ + { + "type": "hidden", + "model_class": "HiddenToolParameter", + "value": "workdb.sqlite", + "is_dynamic": false, + "refresh_on_change": false, + "label": "", + "name": "workdb", + "argument": null, + "help": "", + "hidden": true, + "optional": false + }, + { + "type": "section", + "expanded": false, + "inputs": [ + { + "type": "data", + "model_class": "DataToolParameter", + "value": null, + "edam": { + "edam_formats": [ + "format_3621" + ], + "edam_data": [ + "data_0006" + ] + }, + "extensions": [ + "sqlite" + ], + "argument": null, + "optional": true, + "hidden": false, + "help": "Make sure your added table names are not already in this database", + "name": "withdb", + "refresh_on_change": true, + "multiple": false, + "label": "Add tables to this Database", + "options": { + "hda": [], + "hdca": [] + }, + "is_dynamic": false + } + ], + "model_class": "Section", + "name": "add_to_database", + "title": "Add tables to an existing database", + "help": null + }, + { + "type": "repeat", + "model_class": "Repeat", + "default": 0, + "max": "__Infinity__", + "inputs": [ + { + "type": "data", + "model_class": "DataToolParameter", + "value": null, + "edam": { + "edam_formats": [ + "format_3475" + ], + "edam_data": [ + "data_0006" + ] + }, + "extensions": [ + "tabular" + ], + "argument": null, + "optional": false, + "hidden": false, + "help": "", + "name": "table", + "refresh_on_change": true, + "multiple": false, + "label": "Tabular Dataset for Table", + "options": { + "hda": [], + "hdca": [] + }, + "is_dynamic": false + }, + { + "type": "section", + "expanded": false, + "inputs": [ + { + "type": "repeat", + "model_class": "Repeat", + "default": 0, + "max": "__Infinity__", + "inputs": [ + { + "type": "conditional", + "model_class": "Conditional", + "name": "filter", + "test_param": { + "type": "select", + "model_class": "SelectToolParameter", + "value": "skip", + "is_dynamic": false, + "argument": null, + "optional": false, + "hidden": false, + "help": "", + "name": "filter_type", + "refresh_on_change": true, + "multiple": false, + "textable": true, + "label": "Filter By", + "options": [ + [ + "skip leading lines", + "skip", + false + ], + [ + "comment char", + "comment", + false + ], + [ + "by regex expression matching", + "regex", + false + ], + [ + "select columns", + "select_columns", + false + ], + [ + "regex replace value in column", + "replace", + false + ], + [ + "prepend a line number column", + "prepend_line_num", + false + ], + [ + "append a line number column", + "append_line_num", + false + ], + [ + "prepend a column with the given text", + "prepend_text", + false + ], + [ + "append a column with the given text", + "append_text", + false + ], + [ + "normalize list columns, replicates row for each item in list", + "normalize", + false + ] + ], + "display": null + }, + "cases": [ + { + "model_class": "ConditionalWhen", + "value": "skip", + "inputs": [ + { + "type": "integer", + "model_class": "IntegerToolParameter", + "value": "", + "refresh_on_change": false, + "area": false, + "argument": null, + "help": "Leave blank to use the comment lines metadata for this dataset", + "hidden": false, + "optional": true, + "name": "skip_lines", + "min": 0, + "max": null, + "label": "Skip lines", + "datalist": [], + "is_dynamic": false + } + ] + }, + { + "model_class": "ConditionalWhen", + "value": "comment", + "inputs": [ + { + "type": "select", + "model_class": "SelectToolParameter", + "value": "35", + "is_dynamic": false, + "argument": null, + "optional": true, + "hidden": false, + "help": "lines beginning with these are skipped", + "name": "comment_char", + "refresh_on_change": false, + "multiple": true, + "textable": true, + "label": "Ignore lines beginning with these characters", + "options": [ + [ + ">", + "62", + false + ], + [ + "@", + "64", + false + ], + [ + "+", + "43", + false + ], + [ + "<", + "60", + false + ], + [ + "*", + "42", + false + ], + [ + "-", + "45", + false + ], + [ + "=", + "61", + false + ], + [ + "|", + "124", + false + ], + [ + "?", + "63", + false + ], + [ + "$", + "36", + false + ], + [ + ".", + "46", + false + ], + [ + ":", + "58", + false + ], + [ + "&", + "38", + false + ], + [ + "%", + "37", + false + ], + [ + "^", + "94", + false + ], + [ + "#", + "35", + true + ], + [ + "!", + "33", + false + ] + ], + "display": "checkboxes" + } + ] + }, + { + "model_class": "ConditionalWhen", + "value": "prepend_line_num", + "inputs": [] + }, + { + "model_class": "ConditionalWhen", + "value": "append_line_num", + "inputs": [] + }, + { + "model_class": "ConditionalWhen", + "value": "prepend_text", + "inputs": [ + { + "type": "text", + "model_class": "TextToolParameter", + "value": "", + "refresh_on_change": false, + "area": false, + "argument": null, + "optional": false, + "hidden": false, + "help": "", + "name": "column_text", + "label": "text for column", + "datalist": [], + "is_dynamic": false + } + ] + }, + { + "model_class": "ConditionalWhen", + "value": "append_text", + "inputs": [ + { + "type": "text", + "model_class": "TextToolParameter", + "value": "", + "refresh_on_change": false, + "area": false, + "argument": null, + "optional": false, + "hidden": false, + "help": "", + "name": "column_text", + "label": "text for column", + "datalist": [], + "is_dynamic": false + } + ] + }, + { + "model_class": "ConditionalWhen", + "value": "regex", + "inputs": [ + { + "type": "text", + "model_class": "TextToolParameter", + "value": "", + "refresh_on_change": false, + "area": false, + "argument": null, + "optional": false, + "hidden": false, + "help": "", + "name": "regex_pattern", + "label": "regex pattern", + "datalist": [], + "is_dynamic": false + }, + { + "type": "select", + "model_class": "SelectToolParameter", + "value": "exclude_match", + "is_dynamic": false, + "argument": null, + "optional": false, + "hidden": false, + "help": "", + "name": "regex_action", + "refresh_on_change": false, + "multiple": false, + "textable": true, + "label": "action for regex match", + "options": [ + [ + "exclude line on pattern match", + "exclude_match", + false + ], + [ + "include line on pattern match", + "include_match", + false + ], + [ + "exclude line if pattern found", + "exclude_find", + false + ], + [ + "include line if pattern found", + "include_find", + false + ] + ], + "display": null + } + ] + }, + { + "model_class": "ConditionalWhen", + "value": "select_columns", + "inputs": [ + { + "type": "text", + "model_class": "TextToolParameter", + "value": "", + "refresh_on_change": false, + "area": false, + "argument": null, + "optional": false, + "hidden": false, + "help": "example: 1,4,2 or c1,c4,c2(selects the first,fourth, and second columns)", + "name": "columns", + "label": "enter column numbers to keep", + "datalist": [], + "is_dynamic": false + } + ] + }, + { + "model_class": "ConditionalWhen", + "value": "replace", + "inputs": [ + { + "type": "text", + "model_class": "TextToolParameter", + "value": "", + "refresh_on_change": false, + "area": false, + "argument": null, + "optional": false, + "hidden": false, + "help": "example: 1 or c1 (selects the first column)", + "name": "column", + "label": "enter column number to replace", + "datalist": [], + "is_dynamic": false + }, + { + "type": "text", + "model_class": "TextToolParameter", + "value": "", + "refresh_on_change": false, + "area": false, + "argument": null, + "optional": false, + "hidden": false, + "help": "", + "name": "regex_pattern", + "label": "regex pattern", + "datalist": [], + "is_dynamic": false + }, + { + "type": "text", + "model_class": "TextToolParameter", + "value": "", + "refresh_on_change": false, + "area": false, + "argument": null, + "optional": false, + "hidden": false, + "help": "", + "name": "regex_replace", + "label": "replacement expression", + "datalist": [], + "is_dynamic": false + } + ] + }, + { + "model_class": "ConditionalWhen", + "value": "normalize", + "inputs": [ + { + "type": "text", + "model_class": "TextToolParameter", + "value": "", + "refresh_on_change": false, + "area": false, + "argument": null, + "optional": false, + "hidden": false, + "help": "example: 2,4 or c2,c4 (selects the second, and fourth columns) If multiple columns are selected, they should have the same length and separator on each line", + "name": "columns", + "label": "enter column numbers to normalize", + "datalist": [], + "is_dynamic": false + }, + { + "type": "text", + "model_class": "TextToolParameter", + "value": ",", + "refresh_on_change": false, + "area": false, + "argument": null, + "optional": false, + "hidden": false, + "help": "", + "name": "separator", + "label": "List item delimiter in column", + "datalist": [], + "is_dynamic": false + } + ] + } + ] + } + ], + "min": 0, + "name": "linefilters", + "title": "Filter Tabular Input Lines", + "help": null + } + ], + "model_class": "Section", + "name": "input_opts", + "title": "Filter Dataset Input", + "help": null + }, + { + "type": "section", + "expanded": false, + "inputs": [ + { + "type": "text", + "model_class": "TextToolParameter", + "value": "", + "refresh_on_change": false, + "area": false, + "argument": null, + "optional": true, + "hidden": false, + "help": "By default, tables will be named: t1,t2,...,tn (table names must be unique)", + "name": "table_name", + "label": "Specify Name for Table", + "datalist": [], + "is_dynamic": false + }, + { + "type": "boolean", + "model_class": "BooleanToolParameter", + "value": "false", + "refresh_on_change": false, + "argument": null, + "help": "The names will be quoted if they are not valid SQLite column names.", + "hidden": false, + "optional": false, + "name": "column_names_from_first_line", + "falsevalue": "False", + "truevalue": "True", + "label": "Use first line as column names", + "is_dynamic": false + }, + { + "type": "text", + "model_class": "TextToolParameter", + "value": "", + "refresh_on_change": false, + "area": false, + "argument": null, + "optional": true, + "hidden": false, + "help": "By default, table columns will be named: c1,c2,c3,...,cn (column names for a table must be unique) You can override the default names by entering a comma -separated list of names, e.g. ',name1,,,name2' would rename the second and fifth columns.", + "name": "col_names", + "label": "Specify Column Names (comma-separated list)", + "datalist": [], + "is_dynamic": false + }, + { + "type": "boolean", + "model_class": "BooleanToolParameter", + "value": "false", + "refresh_on_change": false, + "argument": null, + "help": "", + "hidden": false, + "optional": false, + "name": "load_named_columns", + "falsevalue": "", + "truevalue": "load_named_columns", + "label": "Only load the columns you have named into database", + "is_dynamic": false + }, + { + "type": "text", + "model_class": "TextToolParameter", + "value": "", + "refresh_on_change": false, + "area": false, + "argument": null, + "optional": true, + "hidden": false, + "help": "Only creates this additional column when a name is entered. (This can not be the same name as any of the other columns in this table.)", + "name": "pkey_autoincr", + "label": "Add an auto increment primary key column with this name", + "datalist": [], + "is_dynamic": false + }, + { + "type": "repeat", + "model_class": "Repeat", + "default": 0, + "max": "__Infinity__", + "inputs": [ + { + "type": "boolean", + "model_class": "BooleanToolParameter", + "value": "false", + "refresh_on_change": false, + "argument": null, + "help": "", + "hidden": false, + "optional": false, + "name": "unique", + "falsevalue": "no", + "truevalue": "yes", + "label": "This is a unique index", + "is_dynamic": false + }, + { + "type": "text", + "model_class": "TextToolParameter", + "value": "", + "refresh_on_change": false, + "area": false, + "argument": null, + "optional": false, + "hidden": false, + "help": "Create an index on the column names: e.g. for default column names: c1 or c2,c4 ( use the names you gave for columns)", + "name": "index_columns", + "label": "Index on Columns", + "datalist": [], + "is_dynamic": false + } + ], + "min": 0, + "name": "indexes", + "title": "Table Index", + "help": null + } + ], + "model_class": "Section", + "name": "tbl_opts", + "title": "Table Options", + "help": null + } + ], + "min": 0, + "name": "tables", + "title": "Database Table", + "help": null + }, + { + "type": "boolean", + "model_class": "BooleanToolParameter", + "value": "false", + "refresh_on_change": false, + "argument": null, + "help": "SQLite to tabular tool can run additional queries on this database", + "hidden": false, + "optional": false, + "name": "save_db", + "falsevalue": "no", + "truevalue": "yes", + "label": "Save the sqlite database in your history", + "is_dynamic": false + }, + { + "type": "text", + "model_class": "TextToolParameter", + "value": "", + "refresh_on_change": false, + "area": true, + "argument": null, + "optional": true, + "hidden": false, + "help": "By default: tables are named: t1,t2,...,tn and columns in each table: c1,c2,...,cn", + "name": "sqlquery", + "label": "SQL Query to generate tabular output", + "datalist": [], + "is_dynamic": false + }, + { + "type": "conditional", + "model_class": "Conditional", + "name": "query_result", + "test_param": { + "type": "select", + "model_class": "SelectToolParameter", + "value": "yes", + "is_dynamic": false, + "argument": null, + "optional": false, + "hidden": false, + "help": "", + "name": "header", + "refresh_on_change": true, + "multiple": false, + "textable": true, + "label": "include query result column headers", + "options": [ + [ + "Yes", + "yes", + false + ], + [ + "No", + "no", + false + ] + ], + "display": null + }, + "cases": [ + { + "model_class": "ConditionalWhen", + "value": "yes", + "inputs": [ + { + "type": "select", + "model_class": "SelectToolParameter", + "value": "35", + "is_dynamic": false, + "argument": null, + "optional": true, + "hidden": false, + "help": "", + "name": "header_prefix", + "refresh_on_change": false, + "multiple": false, + "textable": true, + "label": "Prefix character for column_header line", + "options": [ + [ + "no comment character prefix", + "", + false + ], + [ + ">", + "62", + false + ], + [ + "@", + "64", + false + ], + [ + "+", + "43", + false + ], + [ + "<", + "60", + false + ], + [ + "*", + "42", + false + ], + [ + "-", + "45", + false + ], + [ + "=", + "61", + false + ], + [ + "|", + "124", + false + ], + [ + "?", + "63", + false + ], + [ + "$", + "36", + false + ], + [ + ".", + "46", + false + ], + [ + ":", + "58", + false + ], + [ + "&", + "38", + false + ], + [ + "%", + "37", + false + ], + [ + "^", + "94", + false + ], + [ + "#", + "35", + true + ], + [ + "!", + "33", + false + ] + ], + "display": null + } + ] + }, + { + "model_class": "ConditionalWhen", + "value": "no", + "inputs": [] + } + ] + } + ], + "outputs": [ + { + "label": "sqlite db of ${on_string}", + "model_class": "ToolOutput", + "name": "sqlitedb", + "format": "sqlite", + "edam_format": "format_3621", + "hidden": false, + "edam_data": "data_0006" + }, + { + "label": "query results on ${on_string}", + "model_class": "ToolOutput", + "name": "output", + "format": "tabular", + "edam_format": "format_3475", + "hidden": false, + "edam_data": "data_0006" + } + ], + "edam_operations": [], + "description": "using sqlite sql", + "panel_section_id": "proteomics", + "panel_section_name": "Proteomics", + "labels": [], + "tool_shed_repository": { + "name": "query_tabular", + "owner": "iuc", + "changeset_revision": "1ea4e668bf73", + "tool_shed": "toolshed.g2.bx.psu.edu" + }, + "name": "Query Tabular", + "form_style": "regular", + "edam_topics": [] +} \ No newline at end of file diff --git a/tests/data/training_tutorial.md b/tests/data/training_tutorial.md new file mode 100644 index 000000000..f0855ce2e --- /dev/null +++ b/tests/data/training_tutorial.md @@ -0,0 +1,35 @@ +--- +layout: tutorial_hands_on + +title: "A tutorial to test" +zenodo_link: "https://zenodo.org/record/1321885" +questions: + - "What is the purpose of the tutorial?" +objectives: + - "A learning objective" + - "Analysis of differentially expressed genes" + - "Identification of functional enrichment among differentially expressed genes" +time_estimation: "1H" +key_points: + - "Take home message" +contributors: + - the_best_contributor +--- + +# Introduction +{:.no_toc} + +The introduction + +> ### Agenda +> +> In this tutorial, we will deal with: +> +> 1. TOC +> {:toc} +> +{: .agenda} + +# First section + +# Second section \ No newline at end of file diff --git a/tests/data/training_tutorial_wo_zenodo.md b/tests/data/training_tutorial_wo_zenodo.md new file mode 100644 index 000000000..40c1b5df6 --- /dev/null +++ b/tests/data/training_tutorial_wo_zenodo.md @@ -0,0 +1,35 @@ +--- +layout: tutorial_hands_on + +title: "A tutorial to test" +zenodo_link: "" +questions: + - "What is the purpose of the tutorial?" +objectives: + - "A learning objective" + - "Analysis of differentially expressed genes" + - "Identification of functional enrichment among differentially expressed genes" +time_estimation: "1H" +key_points: + - "Take home message" +contributors: + - the_best_contributor +--- + +# Introduction +{:.no_toc} + +The introduction + +> ### Agenda +> +> In this tutorial, we will deal with: +> +> 1. TOC +> {:toc} +> +{: .agenda} + +# First section + +# Second section \ No newline at end of file diff --git a/tests/data/training_wf_param_values.json b/tests/data/training_wf_param_values.json new file mode 100644 index 000000000..80cff6d63 --- /dev/null +++ b/tests/data/training_wf_param_values.json @@ -0,0 +1,65 @@ +{ + "save_db": "false", + "add_to_database": { + "withdb": { + "output_name": "output", + "id": 0 + } + }, + "workdb": "workdb.sqlite", + "__rerun_remap_job_id__": null, + "__page__": null, + "tables": [ + { + "tbl_opts": { + "pkey_autoincr": "", + "column_names_from_first_line": "false", + "table_name": "", + "indexes": [], + "load_named_columns": "false", + "col_names": "" + }, + "__index__": 0, + "input_opts": { + "linefilters": [ + { + "__index__": 0, + "filter": { + "filter_type": "skip", + "skip_lines": "1", + "__current_case__": 0 + } + } + ] + }, + "table": { + "output_name": "output", + "id": 1 + } + }, + { + "tbl_opts": { + "pkey_autoincr": "", + "column_names_from_first_line": "false", + "table_name": "", + "indexes": [], + "load_named_columns": "false", + "col_names": "" + }, + "__index__": 1, + "input_opts": { + "linefilters": [] + }, + "table": { + "output_name": "output", + "id": 2 + } + } + ], + "query_result": { + "header_prefix": "38", + "header": "yes", + "__current_case__": 0 + }, + "sqlquery": "" +} \ No newline at end of file diff --git a/tests/data/training_workflow.ga b/tests/data/training_workflow.ga new file mode 100644 index 000000000..d1840921f --- /dev/null +++ b/tests/data/training_workflow.ga @@ -0,0 +1,230 @@ +{ + "uuid": "82ae273d-dd17-4f97-a286-8c2c0d56a6c9", + "tags": [], + "format-version": "0.1", + "name": "Test training workflow", + "steps": { + "0": { + "tool_id": null, + "tool_version": null, + "outputs": [], + "workflow_outputs": [], + "input_connections": {}, + "tool_state": "{\"collection_type\": \"list\"}", + "id": 0, + "uuid": "72575fe1-340c-41dd-8347-8ac6ead7a981", + "errors": null, + "name": "Input dataset collection", + "label": null, + "inputs": [], + "position": { + "top": 224, + "left": 198.5 + }, + "annotation": "", + "content_id": null, + "type": "data_collection_input" + }, + "1": { + "tool_id": null, + "tool_version": null, + "outputs": [], + "workflow_outputs": [], + "input_connections": {}, + "tool_state": "{}", + "id": 1, + "uuid": "6b1638b8-97ee-465a-a5a2-2d4346c33c80", + "errors": null, + "name": "Input dataset", + "label": null, + "inputs": [], + "position": { + "top": 296, + "left": 234.5 + }, + "annotation": "", + "content_id": null, + "type": "data_input" + }, + "2": { + "tool_id": null, + "tool_version": null, + "outputs": [], + "workflow_outputs": [], + "input_connections": {}, + "tool_state": "{}", + "id": 2, + "uuid": "ab3473ed-778b-4dc0-baaa-d9c3b68faa95", + "errors": null, + "name": "Input dataset", + "label": null, + "inputs": [], + "position": { + "top": 388, + "left": 228 + }, + "annotation": "", + "content_id": null, + "type": "data_input" + }, + "3": { + "tool_id": "toolshed.g2.bx.psu.edu/repos/devteam/fastqc/fastqc/0.71", + "tool_version": "0.71", + "outputs": [ + { + "type": "html", + "name": "html_file" + }, + { + "type": "txt", + "name": "text_file" + } + ], + "workflow_outputs": [], + "input_connections": { + "contaminants": { + "output_name": "output", + "id": 1 + }, + "input_file": { + "output_name": "output", + "id": 0 + } + }, + "tool_state": "{\"__page__\": null, \"contaminants\": \"{\\\"__class__\\\": \\\"RuntimeValue\\\"}\", \"__rerun_remap_job_id__\": null, \"limits\": \"{\\\"__class__\\\": \\\"RuntimeValue\\\"}\", \"input_file\": \"{\\\"__class__\\\": \\\"RuntimeValue\\\"}\"}", + "id": 3, + "tool_shed_repository": { + "owner": "devteam", + "changeset_revision": "ff9530579d1f", + "name": "fastqc", + "tool_shed": "toolshed.g2.bx.psu.edu" + }, + "uuid": "e0c41ba6-03a9-4ff3-8ab4-9f5dd8125e4c", + "errors": null, + "name": "FastQC", + "post_job_actions": {}, + "label": null, + "inputs": [ + { + "name": "contaminants", + "description": "runtime parameter for tool FastQC" + }, + { + "name": "limits", + "description": "runtime parameter for tool FastQC" + }, + { + "name": "input_file", + "description": "runtime parameter for tool FastQC" + } + ], + "position": { + "top": 144, + "left": 514.5 + }, + "annotation": "", + "content_id": "toolshed.g2.bx.psu.edu/repos/devteam/fastqc/fastqc/0.71", + "type": "tool" + }, + "4": { + "tool_id": "toolshed.g2.bx.psu.edu/repos/iuc/query_tabular/query_tabular/2.0.0", + "tool_version": "2.0.0", + "outputs": [ + { + "type": "sqlite", + "name": "sqlitedb" + }, + { + "type": "tabular", + "name": "output" + } + ], + "workflow_outputs": [], + "input_connections": { + "tables_1|table": { + "output_name": "output", + "id": 2 + }, + "add_to_database|withdb": { + "output_name": "output", + "id": 0 + }, + "tables_0|table": { + "output_name": "output", + "id": 1 + } + }, + "tool_state": "{\"tables\": \"[{\\\"tbl_opts\\\": {\\\"pkey_autoincr\\\": \\\"\\\", \\\"load_named_columns\\\": \\\"false\\\", \\\"indexes\\\": [], \\\"table_name\\\": \\\"\\\", \\\"col_names\\\": \\\"\\\", \\\"column_names_from_first_line\\\": \\\"false\\\"}, \\\"__index__\\\": 0, \\\"input_opts\\\": {\\\"linefilters\\\": [{\\\"filter\\\": {\\\"skip_lines\\\": \\\"1\\\", \\\"__current_case__\\\": 0, \\\"filter_type\\\": \\\"skip\\\"}, \\\"__index__\\\": 0}]}, \\\"table\\\": {\\\"__class__\\\": \\\"RuntimeValue\\\"}}, {\\\"tbl_opts\\\": {\\\"pkey_autoincr\\\": \\\"\\\", \\\"load_named_columns\\\": \\\"false\\\", \\\"indexes\\\": [], \\\"table_name\\\": \\\"\\\", \\\"col_names\\\": \\\"\\\", \\\"column_names_from_first_line\\\": \\\"false\\\"}, \\\"__index__\\\": 1, \\\"input_opts\\\": {\\\"linefilters\\\": []}, \\\"table\\\": {\\\"__class__\\\": \\\"RuntimeValue\\\"}}]\", \"save_db\": \"\\\"false\\\"\", \"__page__\": null, \"__rerun_remap_job_id__\": null, \"sqlquery\": \"\\\"\\\"\", \"add_to_database\": \"{\\\"withdb\\\": {\\\"__class__\\\": \\\"RuntimeValue\\\"}}\", \"query_result\": \"{\\\"header\\\": \\\"yes\\\", \\\"header_prefix\\\": \\\"38\\\", \\\"__current_case__\\\": 0}\", \"workdb\": \"\\\"workdb.sqlite\\\"\"}", + "id": 4, + "tool_shed_repository": { + "owner": "iuc", + "changeset_revision": "1ea4e668bf73", + "name": "query_tabular", + "tool_shed": "toolshed.g2.bx.psu.edu" + }, + "uuid": "e09d110a-526a-4dea-b58f-0c03ae0287f1", + "errors": null, + "name": "Query Tabular", + "post_job_actions": {}, + "label": null, + "inputs": [ + { + "name": "add_to_database", + "description": "runtime parameter for tool Query Tabular" + } + ], + "position": { + "top": 353, + "left": 519 + }, + "annotation": "", + "content_id": "toolshed.g2.bx.psu.edu/repos/iuc/query_tabular/query_tabular/2.0.0", + "type": "tool" + }, + "5": { + "tool_id": "toolshed.g2.bx.psu.edu/repos/bgruening/text_processing/tp_head_tool/1.1.0", + "tool_version": "1.1.0", + "outputs": [ + { + "type": "input", + "name": "outfile" + } + ], + "workflow_outputs": [], + "input_connections": { + "infile": { + "output_name": "output", + "id": 4 + } + }, + "tool_state": "{\"count\": \"\\\"10\\\"\", \"__page__\": null, \"complement\": \"\\\"\\\"\", \"__rerun_remap_job_id__\": null, \"infile\": \"{\\\"__class__\\\": \\\"RuntimeValue\\\"}\"}", + "id": 5, + "tool_shed_repository": { + "owner": "bgruening", + "changeset_revision": "74a8bef53a00", + "name": "text_processing", + "tool_shed": "toolshed.g2.bx.psu.edu" + }, + "uuid": "732d789d-e3e2-4d5e-bd28-257e6be0602b", + "errors": null, + "name": "Select first", + "post_job_actions": {}, + "label": null, + "inputs": [ + { + "name": "infile", + "description": "runtime parameter for tool Select first" + } + ], + "position": { + "top": 416, + "left": 775.5 + }, + "annotation": "", + "content_id": "toolshed.g2.bx.psu.edu/repos/bgruening/text_processing/tp_head_tool/1.1.0", + "type": "tool" + } + }, + "annotation": "", + "a_galaxy_workflow": "true" +} \ No newline at end of file diff --git a/tests/data/tutorial.md b/tests/data/tutorial.md new file mode 100644 index 000000000..96553f63f --- /dev/null +++ b/tests/data/tutorial.md @@ -0,0 +1,35 @@ +--- +layout: tutorial_hands_on + +title: "A tutorial to test" +zenodo_link: "https://zenodo.org" +questions: + - "What is the purpose of the tutorial?" +objectives: + - "A learning objective" + - "Analysis of differentially expressed genes" + - "Identification of functional enrichment among differentially expressed genes" +time_estimation: "1H" +key_points: + - "Take home message" +contributors: + - the_best_contributor +--- + +# Introduction +{:.no_toc} + +The introduction + +> ### Agenda +> +> In this tutorial, we will deal with: +> +> 1. TOC +> {:toc} +> +{: .agenda} + +# First section + +# Second section \ No newline at end of file diff --git a/tests/test_cmd_serve.py b/tests/test_cmd_serve.py index 3e2b32f80..a437b2679 100644 --- a/tests/test_cmd_serve.py +++ b/tests/test_cmd_serve.py @@ -70,7 +70,7 @@ def test_shed_serve(self): tool_ids = None for i in range(30): tool_ids = [t["id"] for t in user_gi.tools.get_tools()] - if "toolshed.g2.bx.psu.edu/repos/devteam/fastqc/fastqc/0.71" in tool_ids: + if any(_.startswith("toolshed.g2.bx.psu.edu/repos/devteam/fastqc/fastqc/") for _ in tool_ids): found = True break time.sleep(5) diff --git a/tests/test_cmd_training_fill_data_library.py b/tests/test_cmd_training_fill_data_library.py new file mode 100644 index 000000000..5f19ab4bc --- /dev/null +++ b/tests/test_cmd_training_fill_data_library.py @@ -0,0 +1,81 @@ +"""Tests for the ``training_fill_data_library`` command.""" +import os +import shutil + +from .test_cmd_training_generate_from_wf import create_tutorial_dir +from .test_utils import ( + CliTestCase, + TEST_DATA_DIR +) + + +class CmdTrainingFillDataLibraryTestCase(CliTestCase): + """Container class defining test cases for the ``training_fill_data_library`` command.""" + + def test_training_fill_data_library_command_empty(self): + """Test training_fill_data_library command with no arguments.""" + with self._isolate(): + training_fill_data_library_command = [ + "training_fill_data_library" + ] + self._check_exit_code(training_fill_data_library_command, exit_code=2) + + def test_training_fill_data_library_command_topic(self): + """Test training_fill_data_library command with only topic name.""" + with self._isolate(): + training_fill_data_library_command = [ + "training_fill_data_library", + "--topic_name", "test" + ] + self._check_exit_code(training_fill_data_library_command, exit_code=2) + + def test_training_fill_data_library_command_tutorial_topic(self): + """Test training_fill_data_library command with tutorial name.""" + with self._isolate(): + topic_n = "test" + tuto_n = "test" + datatype = os.path.join(TEST_DATA_DIR, "training_datatypes.yaml") + # not working + create_tutorial_dir(topic_n, tuto_n) + training_fill_data_library_command = [ + "training_fill_data_library", + "--topic_name", topic_n, + "--tutorial_name", tuto_n, + "--datatypes", datatype + ] + shutil.rmtree("topics") + self._check_exit_code(training_fill_data_library_command, exit_code=-1) + # working + create_tutorial_dir(topic_n, tuto_n) + training_fill_data_library_command = [ + "training_fill_data_library", + "--topic_name", topic_n, + "--tutorial_name", tuto_n, + "--datatypes", datatype + ] + self._check_exit_code(training_fill_data_library_command, exit_code=0) + + def test_training_fill_data_library_command_tutorial_zenodo(self): + """Test training_fill_data_library command with zenodo link.""" + with self._isolate(): + topic_n = "test" + tuto_n = "test" + create_tutorial_dir(topic_n, tuto_n) + datatype = os.path.join(TEST_DATA_DIR, "training_datatypes.yaml") + # not working test + training_fill_data_library_command = [ + "training_fill_data_library", + "--topic_name", topic_n, + "--tutorial_name", tuto_n, + "--zenodo_link", "https://zenodo.org/record/1321885" + ] + self._check_exit_code(training_fill_data_library_command, exit_code=-1) + # working + training_fill_data_library_command = [ + "training_fill_data_library", + "--topic_name", topic_n, + "--tutorial_name", tuto_n, + "--zenodo_link", "https://zenodo.org/record/1321885", + "--datatypes", datatype + ] + self._check_exit_code(training_fill_data_library_command, exit_code=0) diff --git a/tests/test_cmd_training_generate_from_wf.py b/tests/test_cmd_training_generate_from_wf.py new file mode 100644 index 000000000..4b9619bd6 --- /dev/null +++ b/tests/test_cmd_training_generate_from_wf.py @@ -0,0 +1,89 @@ +"""Tests for the ``training_generate_from_wf`` command.""" +import os +import shutil + +from .test_utils import ( + CliTestCase, + TEST_DATA_DIR +) + + +def create_tutorial_dir(topic_n, tuto_n): + """Create the tutorial directory structure.""" + topic_dir = os.path.join("topics", topic_n) + tuto_dir = os.path.join(topic_dir, "tutorials", tuto_n) + metadata_path = os.path.join(topic_dir, "metadata.yaml") + if not os.path.isdir(topic_dir): + os.makedirs(topic_dir) + if not os.path.isdir(tuto_dir): + os.makedirs(tuto_dir) + if not os.path.exists(metadata_path): + metadata = os.path.join(TEST_DATA_DIR, "training_metadata.yaml") + shutil.copy(metadata, metadata_path) + shutil.copy( + os.path.join(TEST_DATA_DIR, "training_tutorial.md"), + os.path.join(tuto_dir, "tutorial.md")) + + +class CmdTrainingGenerateFromWfTestCase(CliTestCase): + """Container class defining test cases for the ``training_generate_from_wf`` command.""" + + def test_training_generate_from_wf_command_empty(self): + """Test training_generate_from_wf command with no arguments.""" + with self._isolate(): + training_fill_data_library_command = [ + "training_generate_from_wf" + ] + self._check_exit_code(training_fill_data_library_command, exit_code=2) + + def test_training_generate_from_wf_command_topic(self): + """Test training_generate_from_wf command with only topic name.""" + with self._isolate(): + training_fill_data_library_command = [ + "training_generate_from_wf", + "--topic_name", "test" + ] + self._check_exit_code(training_fill_data_library_command, exit_code=2) + + def test_training_generate_from_wf_command_local_wf(self): + """Test training_generate_from_wf command with local workflow.""" + with self._isolate(): + topic_n = "test" + tuto_n = "test" + test_workflow = os.path.join(TEST_DATA_DIR, "test_workflow_1.ga") + # working test + create_tutorial_dir(topic_n, tuto_n) + training_init_command = [ + "training_generate_from_wf", + "--topic_name", topic_n, + "--tutorial_name", tuto_n, + "--workflow", test_workflow + ] + self._check_exit_code(training_init_command, exit_code=0) + shutil.rmtree("topics") + + def test_training_generate_from_wf_command_remote_wf(self): + """Test training_generate_from_wf command with workflow on running instance.""" + with self._isolate(): + topic_n = "test" + tuto_n = "test" + # not working test + training_init_command = [ + "training_generate_from_wf", + "--topic_name", topic_n, + "--tutorial_name", tuto_n, + "--workflow_id", "ID" + ] + self._check_exit_code(training_init_command, exit_code=-1) + # not working test + create_tutorial_dir(topic_n, tuto_n) + training_init_command = [ + "training_generate_from_wf", + "--topic_name", topic_n, + "--tutorial_name", tuto_n, + "--workflow_id", "ID", + "--galaxy_url", "https://usegalaxy.eu/", + "--galaxy_api_key", "API" + ] + self._check_exit_code(training_init_command, exit_code=-1) + shutil.rmtree("topics") diff --git a/tests/test_cmd_training_init.py b/tests/test_cmd_training_init.py new file mode 100644 index 000000000..71bb17f27 --- /dev/null +++ b/tests/test_cmd_training_init.py @@ -0,0 +1,123 @@ +"""Tests for the ``training_init`` command.""" +import os + +from .test_utils import ( + CliTestCase, + TEST_DATA_DIR +) + + +class CmdTrainingInitTestCase(CliTestCase): + """Container class defining test cases for the ``training_init`` command.""" + + def test_training_init_command_by_default(self): + """Test training_init command with only topic name.""" + with self._isolate(): + training_init_command = [ + "training_init", + "--topic_name", "test" + ] + self._check_exit_code(training_init_command, exit_code=0) + + def test_training_init_command_topic(self): + """Test training_init command to create new topic.""" + with self._isolate(): + # working test + training_init_command = [ + "training_init", + "--topic_name", "test", + "--topic_title", "Topic title", + "--topic_target", "use", + "--topic_summary", "Summary" + ] + self._check_exit_code(training_init_command, exit_code=0) + # failing test + training_init_command = [ + "training_init", + "--topic_name", "test", + "--topic_title", "Topic title", + "--topic_target", "test", + "--topic_summary", "Summary" + ] + self._check_exit_code(training_init_command, exit_code=2) + + def test_training_init_command_tutorial_no_topic(self): + """Test training_init command with tutorial but no topic.""" + with self._isolate(): + # working test + training_init_command = [ + "training_init", + "--tutorial_name", "test" + ] + self._check_exit_code(training_init_command, exit_code=2) + + def test_training_init_command_tutorial(self): + """Test training_init command to create new tutorial.""" + with self._isolate(): + # working test + training_init_command = [ + "training_init", + "--topic_name", "test", + "--tutorial_name", "test", + "--tutorial_title", "Title of the tutorial", + "--hands_on", + "--slides" + ] + self._check_exit_code(training_init_command, exit_code=0) + + def test_training_init_command_tutorial_zenodo(self): + """Test training_init command to create new tutorial with zenodo.""" + with self._isolate(): + datatype = os.path.join(TEST_DATA_DIR, "training_datatypes.yaml") + # not working test + training_init_command = [ + "training_init", + "--topic_name", "test", + "--tutorial_name", "test", + "--zenodo_link", "https://zenodo.org/record/1321885" + ] + self._check_exit_code(training_init_command, exit_code=-1) + # working + training_init_command = [ + "training_init", + "--topic_name", "test", + "--tutorial_name", "test", + "--zenodo_link", "https://zenodo.org/record/1321885", + "--datatypes", datatype + ] + self._check_exit_code(training_init_command, exit_code=0) + + def test_training_init_command_tutorial_local_wf(self): + """Test training_init command to create new tutorial with local workflow.""" + with self._isolate(): + test_workflow = os.path.join(TEST_DATA_DIR, "test_workflow_1.ga") + # working test + training_init_command = [ + "training_init", + "--topic_name", "test", + "--tutorial_name", "test", + "--workflow", test_workflow + ] + self._check_exit_code(training_init_command, exit_code=0) + + def test_training_init_command_tutorial_remote_wf(self): + """Test training_init command to create new tutorial with workflow on running instance.""" + with self._isolate(): + # not working test + training_init_command = [ + "training_init", + "--topic_name", "test", + "--tutorial_name", "test", + "--workflow_id", "ID" + ] + self._check_exit_code(training_init_command, exit_code=-1) + # working test + training_init_command = [ + "training_init", + "--topic_name", "test", + "--tutorial_name", "test", + "--workflow_id", "ID", + "--galaxy_url", "https://usegalaxy.eu/", + "--galaxy_api_key", "API" + ] + self._check_exit_code(training_init_command, exit_code=0) diff --git a/tests/test_training.py b/tests/test_training.py new file mode 100644 index 000000000..66bf183a2 --- /dev/null +++ b/tests/test_training.py @@ -0,0 +1,243 @@ +"""Training training functions.""" +import json +import os +import shutil + +from nose.tools import assert_raises_regexp + +from planemo import cli +from planemo.runnable import for_path +from planemo.training import Training +from .test_utils import ( + skip_if_environ, + TEST_DATA_DIR, +) + + +datatype_fp = os.path.join(TEST_DATA_DIR, "training_datatypes.yaml") +tuto_fp = os.path.join(TEST_DATA_DIR, "training_tutorial.md") +tuto_wo_zenodo_fp = os.path.join(TEST_DATA_DIR, "training_tutorial_wo_zenodo.md") +zenodo_link = 'https://zenodo.org/record/1321885' +# load a workflow generated from Galaxy +WF_FP = os.path.join(TEST_DATA_DIR, "training_workflow.ga") +with open(WF_FP, "r") as wf_f: + wf = json.load(wf_f) +# load wf_param_values (output of tutorial.get_wf_param_values on wf['steps']['4']) +with open(os.path.join(TEST_DATA_DIR, "training_wf_param_values.json"), "r") as wf_param_values_f: + wf_param_values = json.load(wf_param_values_f) +# configuration +RUNNABLE = for_path(WF_FP) +CTX = cli.Context() +CTX.planemo_directory = "/tmp/planemo-test-workspace" +KWDS = { + 'topic_name': 'my_new_topic', + 'topic_title': "New topic", + 'topic_target': "use", + 'topic_summary': "Topic summary", + 'tutorial_name': "new_tuto", + 'tutorial_title': "Title of tuto", + 'hands_on': True, + 'slides': True, + 'workflow': None, + 'workflow_id': None, + 'zenodo_link': None, + 'datatypes': os.path.join(TEST_DATA_DIR, "training_datatypes.yaml"), + 'templates': None, + # planemo configuation + 'conda_auto_init': True, + 'conda_auto_install': True, + 'conda_copy_dependencies': False, + 'conda_debug': False, + 'conda_dependency_resolution': False, + 'conda_ensure_channels': 'iuc,bioconda,conda-forge,defaults', + 'conda_exec': None, + 'conda_prefix': None, + 'conda_use_local': False, + 'brew_dependency_resolution': False, + 'daemon': False, + 'database_connection': None, + 'database_type': 'auto', + 'dependency_resolvers_config_file': None, + 'docker': False, + 'docker_cmd': 'docker', + 'docker_extra_volume': None, + 'docker_galaxy_image': 'quay.io/bgruening/galaxy', + 'docker_host': None, + 'docker_sudo': False, + 'docker_sudo_cmd': 'sudo', + 'engine': 'galaxy', + 'extra_tools': (), + 'file_path': None, + 'galaxy_api_key': None, + 'galaxy_branch': None, + 'galaxy_database_seed': None, + 'galaxy_email': 'planemo@galaxyproject.org', + 'galaxy_root': None, + 'galaxy_single_user': True, + 'galaxy_source': None, + 'galaxy_url': None, + 'host': '127.0.0.1', + 'ignore_dependency_problems': False, + 'install_galaxy': False, + 'job_config_file': None, + 'mulled_containers': False, + 'no_cleanup': False, + 'no_cache_galaxy': False, + 'no_dependency_resolution': True, + 'non_strict_cwl': False, + 'pid_file': None, + 'port': '9090', + 'postgres_database_host': None, + 'postgres_database_port': None, + 'postgres_database_user': 'postgres', + 'postgres_psql_path': 'psql', + 'profile': None, + 'shed_dependency_resolution': False, + 'shed_install': True, + 'shed_tool_conf': None, + 'shed_tool_path': None, + 'skip_venv': False, + 'test_data': None, + 'tool_data_table': None, + 'tool_dependency_dir': None +} + + +def test_training_init(): + """Test :func:`planemo.training.Training.init`.""" + train = Training(KWDS) + assert train.topics_dir == "topics" + assert train.topic is not None + assert train.tuto is None + + +def test_training_init_training(): + """Test :func:`planemo.training.Training.init_training`.""" + train = Training(KWDS) + # new topic, nothing else + train.kwds['tutorial_name'] = None + train.kwds['slides'] = None + train.kwds['workflow'] = None + train.kwds['workflow_id'] = None + train.kwds['zenodo_link'] = None + train.init_training(CTX) + assert os.path.exists(train.topic.dir) + assert not os.listdir(os.path.join(train.topic.dir, 'tutorials')) + # no new topic, no tutorial name but hands-on + train.kwds['slides'] = True + exp_exception = "A tutorial name is needed to create the skeleton of a tutorial slide deck" + with assert_raises_regexp(Exception, exp_exception): + train.init_training(CTX) + # no new topic, no tutorial name but workflow + train.kwds['workflow'] = WF_FP + train.kwds['slides'] = False + exp_exception = "A tutorial name is needed to create the skeleton of the tutorial from a workflow" + with assert_raises_regexp(Exception, exp_exception): + train.init_training(CTX) + # no new topic, no tutorial name but zenodo + train.kwds['workflow'] = None + train.kwds['zenodo_link'] = zenodo_link + exp_exception = "A tutorial name is needed to add Zenodo information" + with assert_raises_regexp(Exception, exp_exception): + train.init_training(CTX) + # no new topic, new tutorial + train.kwds['tutorial_name'] = "new_tuto" + train.kwds['workflow'] = None + train.kwds['zenodo_link'] = None + train.init_training(CTX) + assert os.path.exists(train.tuto.dir) + assert os.path.exists(train.tuto.tuto_fp) + assert train.kwds['tutorial_title'] in open(train.tuto.tuto_fp, 'r').read() + # clean after + shutil.rmtree(train.topics_dir) + shutil.rmtree("metadata") + + +def create_existing_tutorial(exit_tuto_name, tuto_fp, topic): + exist_tuto_dir = os.path.join(topic.dir, 'tutorials', exit_tuto_name) + os.makedirs(exist_tuto_dir) + shutil.copyfile(tuto_fp, os.path.join(exist_tuto_dir, 'tutorial.md')) + + +def test_training_check_topic_init_tuto(): + """Test :func:`planemo.training.Training.check_topic_init_tuto`.""" + train = Training(KWDS) + # no topic + exp_exception = "The topic my_new_topic does not exists. It should be created" + with assert_raises_regexp(Exception, exp_exception): + train.check_topic_init_tuto() + # add topic + train.kwds['tutorial_name'] = None + train.kwds['slides'] = None + train.kwds['workflow'] = None + train.kwds['workflow_id'] = None + train.kwds['zenodo_link'] = None + train.init_training(CTX) + train.kwds['tutorial_name'] = 'existing_tutorial' + create_existing_tutorial('existing_tutorial', tuto_fp, train.topic) + train.check_topic_init_tuto() + assert train.tuto.name == train.kwds['tutorial_name'] + assert train.tuto.datatype_fp + # clean after + shutil.rmtree(train.topics_dir) + shutil.rmtree("metadata") + + +def test_fill_data_library(): + """Test :func:`planemo.training.fill_data_library`.""" + train = Training(KWDS) + train.kwds['tutorial_name'] = None + train.kwds['slides'] = False + train.kwds['hands_on'] = False + train.init_training(CTX) + train.kwds['tutorial_name'] = 'existing_tutorial' + create_existing_tutorial('existing_tutorial', tuto_wo_zenodo_fp, train.topic) + # no Zenodo link + train.kwds['zenodo_link'] = None + exp_exception = "A Zenodo link should be provided either in the metadata file or as argument of the command" + with assert_raises_regexp(Exception, exp_exception): + train.fill_data_library(CTX) + # with a given Zenodo link and no Zenodo in metadata + train.kwds['zenodo_link'] = zenodo_link + train.fill_data_library(CTX) + assert 'DOI: 10.5281/zenodo.1321885' in open(train.tuto.data_lib_fp, 'r').read() + assert 'zenodo_link: %s' % zenodo_link in open(train.tuto.tuto_fp, 'r').read() + # with a given Zenodo link and Zenodo in metadata + new_z_link = 'https://zenodo.org/record/1324204' + train.kwds['zenodo_link'] = new_z_link + train.tuto = None + train.fill_data_library(CTX) + assert 'DOI: 10.5281/zenodo.1324204' in open(train.tuto.data_lib_fp, 'r').read() + assert 'zenodo_link: %s' % new_z_link in open(train.tuto.tuto_fp, 'r').read() + # with no given Zenodo link + train.kwds['zenodo_link'] = None + train.fill_data_library(CTX) + assert 'DOI: 10.5281/zenodo.1324204' in open(train.tuto.data_lib_fp, 'r').read() + assert 'zenodo_link: %s' % new_z_link in open(train.tuto.tuto_fp, 'r').read() + # clean after + shutil.rmtree(train.topics_dir) + shutil.rmtree("metadata") + + +@skip_if_environ("PLANEMO_SKIP_GALAXY_TESTS") +def test_generate_tuto_from_wf(): + """Test :func:`planemo.training.generate_tuto_from_wf`.""" + train = Training(KWDS) + train.kwds['tutorial_name'] = None + train.kwds['slides'] = False + train.init_training(CTX) + train.kwds['tutorial_name'] = 'existing_tutorial' + create_existing_tutorial('existing_tutorial', tuto_fp, train.topic) + # no workflow + train.kwds['workflow'] = None + exp_exception = "A path to a local workflow or the id of a workflow on a running Galaxy instance should be provided" + with assert_raises_regexp(Exception, exp_exception): + train.generate_tuto_from_wf(CTX) + # with workflow + train.kwds['workflow'] = WF_FP + train.generate_tuto_from_wf(CTX) + assert '**FastQC** {% icon tool %} with the following parameters:' in open(train.tuto.tuto_fp, 'r').read() + assert os.path.exists(train.tuto.wf_fp) + # clean after + shutil.rmtree(train.topics_dir) + shutil.rmtree("metadata") diff --git a/tests/test_training_tool_input.py b/tests/test_training_tool_input.py new file mode 100644 index 000000000..355bd4691 --- /dev/null +++ b/tests/test_training_tool_input.py @@ -0,0 +1,292 @@ +"""Training:tool_input functions.""" +import json +import os + +from nose.tools import assert_raises_regexp + +from planemo.training.tool_input import ( + get_empty_input, + get_empty_param, + get_input_tool_name, + ToolInput +) +from .test_training import ( + wf, + wf_param_values +) +from .test_utils import ( + TEST_DATA_DIR +) + + +wf_steps = wf['steps'] +# load the output from +# gi.tools.show_tool('toolshed.g2.bx.psu.edu/repos/iuc/query_tabular/query_tabular/2.0.0', io_details=True) +with open(os.path.join(TEST_DATA_DIR, "training_query_tabular.json"), "r") as tool_desc_f: + tool_desc = json.load(tool_desc_f) +tool_inp_desc = tool_desc["inputs"] + + +def test_get_input_tool_name(): + """Test :func:`planemo.training.tool_input.get_input_tool_name`.""" + assert "Input dataset" in get_input_tool_name('1', wf_steps) + assert "output of" in get_input_tool_name('4', wf_steps) + assert get_input_tool_name('10', wf_steps) == '' + + +def test_get_empty_input(): + """Test :func:`planemo.training.tool_input.get_empty_input`.""" + assert '{% icon param-file %} *"Input file"*: File' in get_empty_input() + + +def test_get_empty_param(): + """Test :func:`planemo.training.tool_input.get_empty_param`.""" + assert '*"Parameter"*: `a value`' in get_empty_param() + + +def test_ToolInput_init(): + """Test :func:`planemo.training.tool_input.ToolInput.init`.""" + # test type exception + exp_exception = "No type for the parameter t" + with assert_raises_regexp(Exception, exp_exception): + ToolInput( + tool_inp_desc={'name': 't'}, + wf_param_values=wf_param_values, + wf_steps=wf_steps, + level=1, + should_be_there=False, + force_default=False) + # test with param not in workflow and exception + exp_exception = "t not in workflow" + with assert_raises_regexp(Exception, exp_exception): + ToolInput( + tool_inp_desc={'name': 't', 'type': ''}, + wf_param_values=wf_param_values, + wf_steps=wf_steps, + level=1, + should_be_there=True, + force_default=False) + # test with param not in workflow but no exception + tool_input = ToolInput( + tool_inp_desc={'name': 't', 'type': ''}, + wf_param_values=wf_param_values, + wf_steps=wf_steps, + level=1, + should_be_there=False, + force_default=False) + assert "save_db" in tool_input.wf_param_values + # test with param in workflow + tool_input = ToolInput( + tool_inp_desc=tool_inp_desc[0], + wf_param_values=wf_param_values, + wf_steps=wf_steps, + level=1, + should_be_there=False, + force_default=False) + assert "save_db" not in tool_input.wf_param_values + assert tool_input.wf_param_values == "workdb.sqlite" + + +def test_ToolInput_get_formatted_inputs(): + """Test :func:`planemo.training.tool_input.ToolInput.get_formatted_inputs`.""" + # test no input + tool_input = ToolInput( + tool_inp_desc=tool_inp_desc[1]["inputs"][0], + wf_param_values={}, + wf_steps=wf_steps, + level=1, + should_be_there=False, + force_default=False) + inputlist = tool_input.get_formatted_inputs() + assert inputlist == '' + # test collection + tool_input = ToolInput( + tool_inp_desc=tool_inp_desc[1]["inputs"][0], + wf_param_values=wf_param_values["add_to_database"], + wf_steps=wf_steps, + level=1, + should_be_there=False, + force_default=False) + inputlist = tool_input.get_formatted_inputs() + assert 'param-collection' in inputlist + assert '(Input dataset collection)' in inputlist + # test single input + tool_input = ToolInput( + tool_inp_desc=tool_inp_desc[2]["inputs"][0], + wf_param_values=wf_param_values["tables"][0], + wf_steps=wf_steps, + level=1, + should_be_there=False, + force_default=False) + inputlist = tool_input.get_formatted_inputs() + assert 'param-file' in inputlist + assert '(Input dataset)' in inputlist + + +def test_ToolInput_get_lower_param_desc(): + """Test :func:`planemo.training.tool_input.ToolInput.get_lower_param_desc`.""" + tool_input = ToolInput( + tool_inp_desc=tool_inp_desc[1], + wf_param_values=wf_param_values, + wf_steps=wf_steps, + level=1, + should_be_there=True, + force_default=False) + sub_param_desc = tool_input.get_lower_param_desc() + assert '> - {% icon param-collection %}' in sub_param_desc + + +def test_ToolInput_get_formatted_section_desc(): + """Test :func:`planemo.training.tool_input.ToolInput.get_formatted_section_desc`.""" + tool_input = ToolInput( + tool_inp_desc=tool_inp_desc[1], + wf_param_values=wf_param_values, + wf_steps=wf_steps, + level=1, + should_be_there=True, + force_default=False) + section_paramlist = tool_input.get_formatted_section_desc() + assert '> - In *"' in section_paramlist + assert '> - {%' in section_paramlist + + +def test_ToolInput_get_formatted_conditional_desc(): + """Test :func:`planemo.training.tool_input.ToolInput.get_formatted_conditional_desc`.""" + tool_input = ToolInput( + tool_inp_desc=tool_inp_desc[5], + wf_param_values=wf_param_values, + wf_steps=wf_steps, + level=1, + should_be_there=True, + force_default=False) + conditional_paramlist = tool_input.get_formatted_conditional_desc() + assert '> - *"' in conditional_paramlist + assert '"*: `Yes`' in conditional_paramlist + assert '> - *"' in conditional_paramlist + + +def test_ToolInput_get_formatted_repeat_desc(): + """Test :func:`planemo.training.tool_input.ToolInput.get_formatted_repeat_desc`.""" + tool_input = ToolInput( + tool_inp_desc=tool_inp_desc[2], + wf_param_values=wf_param_values, + wf_steps=wf_steps, + level=1, + should_be_there=True, + force_default=False) + repeat_desc = tool_input.get_formatted_repeat_desc() + assert '> - In *"' in repeat_desc + assert '> - Click on' in repeat_desc + assert '> - In *"1:' in repeat_desc + assert '> -' in repeat_desc + assert '> - In *"2:' in repeat_desc + + +def test_ToolInput_get_formatted_other_param_desc(): + """Test :func:`planemo.training.tool_input.ToolInput.get_formatted_other_param_desc`.""" + # test default value of the tool + tool_input = ToolInput( + tool_inp_desc={'value': 10, 'name': 't', 'type': ''}, + wf_param_values={'t': 10}, + wf_steps=wf_steps, + level=1, + should_be_there=True, + force_default=False) + assert tool_input.get_formatted_other_param_desc() == '' + # test boolean parameter + tool_input = ToolInput( + tool_inp_desc=tool_inp_desc[3], + wf_param_values=wf_param_values, + wf_steps=wf_steps, + level=1, + should_be_there=True, + force_default=False) + assert tool_input.get_formatted_other_param_desc() == '' + tool_input.wf_param_values = 'true' + assert '*: `Yes`' in tool_input.get_formatted_other_param_desc() + # test select parameter + tool_input = ToolInput( + tool_inp_desc=tool_inp_desc[5]['cases'][0]['inputs'][0], + wf_param_values=wf_param_values['query_result'], + wf_steps=wf_steps, + level=1, + should_be_there=True, + force_default=False) + assert '*: `&`' in tool_input.get_formatted_other_param_desc() + # test other parameter + tool_input = ToolInput( + tool_inp_desc=tool_inp_desc[4], + wf_param_values=wf_param_values, + wf_steps=wf_steps, + level=1, + should_be_there=True, + force_default=True) + assert '*: ``' in tool_input.get_formatted_other_param_desc() + + +def test_ToolInput_get_formatted_desc(): + """Test :func:`planemo.training.tool_input.ToolInput.get_formatted_desc`.""" + # test no param values + tool_input = ToolInput( + tool_inp_desc={'value': 10, 'name': 't', 'type': ''}, + wf_param_values={}, + wf_steps=wf_steps, + level=1, + should_be_there=False, + force_default=False) + assert tool_input.get_formatted_desc() == '' + # test data + tool_input = ToolInput( + tool_inp_desc=tool_inp_desc[2]["inputs"][0], + wf_param_values=wf_param_values["tables"][0], + wf_steps=wf_steps, + level=1, + should_be_there=False, + force_default=False) + inputlist = tool_input.get_formatted_inputs() + formatted_desc = tool_input.get_formatted_desc() + assert inputlist == formatted_desc + # test section + tool_input = ToolInput( + tool_inp_desc=tool_inp_desc[1], + wf_param_values=wf_param_values, + wf_steps=wf_steps, + level=1, + should_be_there=True, + force_default=False) + section_paramlist = tool_input.get_formatted_section_desc() + formatted_desc = tool_input.get_formatted_desc() + assert section_paramlist == formatted_desc + # test conditional + tool_input = ToolInput( + tool_inp_desc=tool_inp_desc[5], + wf_param_values=wf_param_values, + wf_steps=wf_steps, + level=1, + should_be_there=True, + force_default=False) + conditional_paramlist = tool_input.get_formatted_conditional_desc() + formatted_desc = tool_input.get_formatted_desc() + assert conditional_paramlist == formatted_desc + # test repeat + tool_input = ToolInput( + tool_inp_desc=tool_inp_desc[2], + wf_param_values=wf_param_values, + wf_steps=wf_steps, + level=1, + should_be_there=True, + force_default=False) + repeat_desc = tool_input.get_formatted_repeat_desc() + formatted_desc = tool_input.get_formatted_desc() + assert repeat_desc == formatted_desc + # test other + tool_input = ToolInput( + tool_inp_desc=tool_inp_desc[3], + wf_param_values=wf_param_values, + wf_steps=wf_steps, + level=1, + should_be_there=True, + force_default=False) + param_desc = tool_input.get_formatted_other_param_desc() + formatted_desc = tool_input.get_formatted_desc() + assert param_desc == formatted_desc diff --git a/tests/test_training_topic.py b/tests/test_training_topic.py new file mode 100644 index 000000000..72ad61b9c --- /dev/null +++ b/tests/test_training_topic.py @@ -0,0 +1,156 @@ +"""Training:topic functions.""" +import os +import shutil + +from planemo.training.topic import Topic +from planemo.training.utils import load_yaml +from .test_utils import TEST_DATA_DIR + + +def test_topic_init(): + """Test :func:`planemo.training.topic.Topic.init`.""" + # test requirement with default parameter + topic = Topic() + assert topic.name == "new_topic" + assert topic.type == "use" + assert topic.title == "The new topic" + assert topic.summary == "Summary" + assert topic.docker_image == "" + assert "maintainers" in topic.maintainers + assert topic.parent_dir == "topics" + assert topic.dir == "topics/new_topic" + assert topic.requirements[0].link == "/introduction/" + assert topic.references[0].link == "link" + # test requirement with non default + topic = Topic(name="topic2", target="admin", title="The 2nd topic", summary="", parent_dir="dir") + assert topic.name == "topic2" + assert topic.type == "admin" + assert topic.title == "The 2nd topic" + assert topic.summary == "" + assert topic.parent_dir == "dir" + assert topic.dir == "dir/topic2" + assert len(topic.requirements) == 0 + assert len(topic.references) == 0 + + +def test_topic_init_from_kwds(): + """Test :func:`planemo.training.topic.Topic.init_from_kwds`.""" + topic = Topic() + topic.init_from_kwds({ + 'topic_name': "topic", + 'topic_title': "New topic", + 'topic_target': "admin", + 'topic_summary': "Topic summary" + }) + assert topic.name == "topic" + assert topic.type == "admin" + assert topic.title == "New topic" + assert topic.summary == "Topic summary" + assert topic.dir == "topics/topic" + assert len(topic.requirements) == 0 + assert len(topic.references) == 0 + + +def test_topic_init_from_metadata(): + """Test :func:`planemo.training.topic.Topic.init_from_metadata`.""" + topic = Topic() + os.makedirs(topic.dir) + shutil.copy(os.path.join(TEST_DATA_DIR, 'training_metadata.yaml'), topic.metadata_fp) + topic.init_from_metadata() + assert topic.name == 'test' + assert topic.title == 'Test' + assert topic.summary == 'Summary' + assert topic.requirements[0].title == 'Galaxy introduction' + assert 'maintainer1' in topic.maintainers + shutil.rmtree(topic.parent_dir) + + +def test_topic_get_requirements(): + """Test :func:`planemo.training.topic.Topic.get_requirements`.""" + topic = Topic() + reqs = topic.get_requirements() + assert len(reqs) == 1 + assert 'title' in reqs[0] + + +def test_topic_get_references(): + """Test :func:`planemo.training.topic.Topic.get_references`.""" + topic = Topic() + refs = topic.get_references() + assert len(refs) == 1 + assert 'authors' in refs[0] + + +def test_topic_export_metadata_to_ordered_dict(): + """Test :func:`planemo.training.topic.Topic.export_metadata_to_ordered_dict`.""" + topic = Topic() + metadata = topic.export_metadata_to_ordered_dict() + assert 'name' in metadata + assert metadata['name'] == "new_topic" + assert 'type' in metadata + assert 'title' in metadata + assert 'summary' in metadata + assert 'requirements' in metadata + assert 'docker_image' in metadata + assert 'maintainers' in metadata + assert 'references' in metadata + + +def test_topic_set_paths(): + """Test :func:`planemo.training.topic.Topic.set_paths`.""" + new_name = 'the_new_name' + topic = Topic() + topic.name = new_name + topic.set_paths() + assert new_name in topic.dir + assert new_name in topic.img_folder + assert new_name in topic.tuto_folder + assert new_name in topic.index_fp + assert new_name in topic.readme_fp + assert new_name in topic.metadata_fp + assert new_name in topic.docker_folder + assert new_name in topic.dockerfile_fp + assert new_name in topic.slides_folder + + +def test_topic_exists(): + """Test :func:`planemo.training.topic.Topic.exists`.""" + topic = Topic() + assert not topic.exists() + os.makedirs(topic.dir) + assert topic.exists() + shutil.rmtree(topic.parent_dir) + + +def test_topic_create_topic_structure(): + """Test :func:`planemo.training.topic.Topic.create_topic_structure`.""" + topic = Topic() + topic.create_topic_structure() + topic_name = "new_topic" + topic_title = "The new topic" + # check the folder and its structure + assert topic.exists() + assert os.path.exists(topic.img_folder) + assert os.path.exists(topic.tuto_folder) + # create the index.md and the topic name + assert os.path.exists(topic.index_fp) + assert topic_name in open(topic.index_fp, 'r').read() + # create the README.md and the topic name + assert os.path.exists(topic.readme_fp) + assert topic_title in open(topic.readme_fp, 'r').read() + # check metadata content + assert os.path.exists(topic.metadata_fp) + metadata = load_yaml(topic.metadata_fp) + assert metadata['name'] == topic_name + # check dockerfile + assert os.path.exists(topic.dockerfile_fp) + assert topic_name in open(topic.dockerfile_fp, 'r').read() + assert topic_title in open(topic.dockerfile_fp, 'r').read() + # check introduction slide + assert os.path.exists(topic.intro_slide_fp) + assert topic_title in open(topic.intro_slide_fp, 'r').read() + # check in metadata directory + assert os.path.exists(os.path.join("metadata", "%s.yaml" % topic_name)) + # clean + shutil.rmtree(topic.parent_dir) + shutil.rmtree("metadata") diff --git a/tests/test_training_tutorial.py b/tests/test_training_tutorial.py new file mode 100644 index 000000000..81b0a2ba7 --- /dev/null +++ b/tests/test_training_tutorial.py @@ -0,0 +1,451 @@ +"""Training:tutorial functions.""" +import os +import shutil + +from nose.tools import assert_raises_regexp + + +from planemo.engine import ( + engine_context, + is_galaxy_engine, +) +from planemo.training import ( + Training +) +from planemo.training.topic import Topic +from planemo.training.tutorial import ( + format_wf_steps, + get_galaxy_datatype, + get_hands_on_boxes_from_local_galaxy, + get_hands_on_boxes_from_running_galaxy, + get_wf_inputs, + get_wf_param_values, + get_zenodo_record, + Tutorial +) +from planemo.training.utils import save_to_yaml +from .test_training import ( + create_existing_tutorial, + CTX, + datatype_fp, + KWDS, + RUNNABLE, + tuto_fp, + wf, + WF_FP, + wf_param_values, + zenodo_link +) +from .test_utils import ( + skip_if_environ, +) + +topic = Topic() +training = Training(KWDS) + + +def test_get_galaxy_datatype(): + """Test :func:`planemo.training.tutorial.get_galaxy_datatype`.""" + assert get_galaxy_datatype("csv", datatype_fp) == "csv" + assert get_galaxy_datatype("test", datatype_fp) == "strange_datatype" + assert "# Please add" in get_galaxy_datatype("unknown", datatype_fp) + + +def test_get_zenodo_record(): + """Test :func:`planemo.training.tutorial.get_zenodo_record`.""" + z_record, req_res = get_zenodo_record(zenodo_link) + file_link_prefix = "https://zenodo.org/api/files/51a1b5db-ff05-4cda-83d4-3b46682f921f" + assert z_record == "1321885" + assert 'files' in req_res + assert req_res['files'][0]['type'] in ['rdata', 'csv'] + assert file_link_prefix in req_res['files'][0]['links']['self'] + # check with wrong zenodo link + z_record, req_res = get_zenodo_record('https://zenodo.org/api/records/zenodooo') + assert z_record is None + assert 'files' in req_res + assert len(req_res['files']) == 0 + # using DOI + z_link = 'https://doi.org/10.5281/zenodo.1321885' + z_record, req_res = get_zenodo_record(z_link) + file_link_prefix = "https://zenodo.org/api/files/51a1b5db-ff05-4cda-83d4-3b46682f921f" + assert z_record == "1321885" + assert 'files' in req_res + assert req_res['files'][0]['type'] in ['rdata', 'csv'] + assert file_link_prefix in req_res['files'][0]['links']['self'] + + +def test_get_wf_inputs(): + """Test :func:`planemo.training.tutorial.get_wf_inputs`.""" + step_inp = { + 'tables_1|table': {'output_name': 'output', 'id': 2}, + 'add_to_database|withdb': {'output_name': 'output', 'id': 0}, + 'tables_0|table': {'output_name': 'output', 'id': 1}, + 'add_to_database|tab_0|tt': {'output_name': 'output', 'id': 0}, + 'tables_2|section|sect': {'output_name': 'output', 'id': 1}, + 'tables_3|tables_0|sect': {'output_name': 'output', 'id': 1} + } + step_inputs = get_wf_inputs(step_inp) + assert 'tables' in step_inputs + assert '0' in step_inputs['tables'] + assert 'table' in step_inputs['tables']['0'] + assert '2' in step_inputs['tables'] + assert 'section' in step_inputs['tables']['2'] + assert 'sect' in step_inputs['tables']['2']['section'] + assert 'output_name' in step_inputs['tables']['2']['section']['sect'] + assert 'add_to_database' in step_inputs + assert 'withdb' in step_inputs['add_to_database'] + assert 'tab' in step_inputs['add_to_database'] + assert '0' in step_inputs['add_to_database']['tab'] + assert 'tt' in step_inputs['add_to_database']['tab']['0'] + + +def test_get_wf_param_values(): + """Test :func:`planemo.training.tutorial.get_wf_param_values`.""" + wf_step = wf['steps']['4'] + wf_param_value_tests = get_wf_param_values(wf_step['tool_state'], get_wf_inputs(wf_step['input_connections'])) + assert isinstance(wf_param_value_tests, dict) + for k in wf_param_values: + assert k in wf_param_value_tests + + +@skip_if_environ("PLANEMO_SKIP_GALAXY_TESTS") +def test_format_wf_steps(): + """Test :func:`planemo.training.tutorial.format_wf_steps`.""" + assert is_galaxy_engine(**KWDS) + with engine_context(CTX, **KWDS) as galaxy_engine: + with galaxy_engine.ensure_runnables_served([RUNNABLE]) as config: + workflow_id = config.workflow_id(WF_FP) + wf = config.gi.workflows.export_workflow_dict(workflow_id) + body = format_wf_steps(wf, config.gi) + assert '## Sub-step with **FastQC**' in body + assert '## Sub-step with **Query Tabular**' in body + assert '## Sub-step with **Select first**' in body + + +@skip_if_environ("PLANEMO_SKIP_GALAXY_TESTS") +def test_get_hands_on_boxes_from_local_galaxy(): + """Test :func:`planemo.training.tutorial.get_hands_on_boxes_from_local_galaxy`.""" + tuto_body = get_hands_on_boxes_from_local_galaxy(KWDS, WF_FP, CTX) + assert '## Sub-step with **FastQC**' in tuto_body + assert '## Sub-step with **Query Tabular**' in tuto_body + assert '## Sub-step with **Select first**' in tuto_body + + +@skip_if_environ("PLANEMO_SKIP_GALAXY_TESTS") +def test_get_hands_on_boxes_from_running_galaxy(): + """Test :func:`planemo.training.tutorial.get_hands_on_boxes_from_running_galaxy`.""" + assert is_galaxy_engine(**KWDS) + galaxy_url = 'http://%s:%s' % (KWDS['host'], KWDS['port']) + with engine_context(CTX, **KWDS) as galaxy_engine: + with galaxy_engine.ensure_runnables_served([RUNNABLE]) as config: + wf_id = config.workflow_id(WF_FP) + tuto_body = get_hands_on_boxes_from_running_galaxy(wf_id, galaxy_url, config.user_api_key) + assert '## Sub-step with **FastQC**' in tuto_body + assert '## Sub-step with **Query Tabular**' in tuto_body + assert '## Sub-step with **Select first**' in tuto_body + + +def test_tutorial_init(): + """Test :func:`planemo.training.tutorial.tutorial.init`.""" + # with default parameter + tuto = Tutorial( + training=training, + topic=topic) + assert tuto.name == "new_tuto" + assert tuto.title == "The new tutorial" + assert tuto.zenodo_link == "" + assert tuto.hands_on + assert not tuto.slides + assert tuto.init_wf_id is None + assert tuto.init_wf_fp is None + assert tuto.datatype_fp == '' + assert "new_tuto" in tuto.dir + assert '## Sub-step with **My Tool**' in tuto.body + assert tuto.data_lib + # with non default parameter + tuto = Tutorial( + training=training, + topic=topic, + name="my_tuto", + title="The tutorial", + zenodo_link="URL") + assert tuto.name == "my_tuto" + assert tuto.title == "The tutorial" + assert tuto.zenodo_link == "URL" + assert "my_tuto" in tuto.dir + + +def test_tutorial_init_from_kwds(): + """Test :func:`planemo.training.tutorial.tutorial.init_from_kwds`.""" + kwds = { + 'tutorial_name': "my_tuto", + 'tutorial_title': "Title of tuto", + 'hands_on': True, + 'slides': True, + 'workflow': WF_FP, + 'workflow_id': 'id', + 'zenodo_link': None, + 'datatypes': datatype_fp + } + tuto = Tutorial( + training=training, + topic=topic) + tuto.init_from_kwds(kwds) + assert tuto.name == "my_tuto" + assert tuto.title == "Title of tuto" + assert tuto.zenodo_link == '' + assert "Which biological questions are addressed by the tutorial?" in tuto.questions + assert tuto.hands_on + assert tuto.slides + assert tuto.init_wf_id == 'id' + assert tuto.init_wf_fp == WF_FP + assert tuto.datatype_fp == datatype_fp + assert "my_tuto" in tuto.dir + + +def test_tutorial_init_from_existing_tutorial(): + """Test :func:`planemo.training.tutorial.tutorial.init_from_existing_tutorial`.""" + tuto = Tutorial( + training=training, + topic=topic) + # non existing tutorial + exp_exception = "The tutorial existing_tutorial does not exists. It should be created" + with assert_raises_regexp(Exception, exp_exception): + tuto.init_from_existing_tutorial('existing_tutorial') + # existing tutorial + create_existing_tutorial('existing_tutorial', tuto_fp, tuto.topic) + tuto.init_from_existing_tutorial('existing_tutorial') + assert tuto.title == 'A tutorial to test' + assert "A learning objective" in tuto.objectives + assert tuto.time_estimation == "1H" + assert 'the_best_contributor' in tuto.contributors + assert '# First section' in tuto.body + shutil.rmtree("topics") + + +def test_tutorial_init_data_lib(): + """Test :func:`planemo.training.tutorial.tutorial.init_data_lib`.""" + tuto = Tutorial( + training=training, + topic=topic) + tuto.init_data_lib() + assert tuto.data_lib['destination']['type'] == 'library' + assert tuto.data_lib['items'][0]['name'] == topic.title + assert tuto.data_lib['items'][0]['items'][0]['name'] == tuto.title + # from existing data library file + os.makedirs(tuto.dir) + tuto.data_lib = {} + tuto.init_data_lib() + assert tuto.data_lib['items'][0]['name'] == topic.title + assert tuto.data_lib['items'][0]['items'][0]['name'] == tuto.title + # other tutorial already there and add the new one + tuto.data_lib['items'][0]['items'][0]['name'] = 'Different tutorial' + save_to_yaml(tuto.data_lib, tuto.data_lib_fp) + tuto.init_data_lib() + assert tuto.data_lib['items'][0]['items'][0]['name'] == 'Different tutorial' + assert tuto.data_lib['items'][0]['items'][1]['name'] == tuto.title + shutil.rmtree("topics") + + +def test_tutorial_get_tuto_metata(): + """Test :func:`planemo.training.tutorial.tutorial.get_tuto_metata`.""" + tuto = Tutorial( + training=training, + topic=topic) + tuto.questions = ['q1', 'q2'] + metadata = tuto.get_tuto_metata() + assert 'title: The new tutorial' in metadata + assert '- q1' in metadata + + +def test_tutorial_set_dir_name(): + """Test :func:`planemo.training.tutorial.tutorial.set_dir_name`.""" + tuto = Tutorial( + training=training, + topic=topic) + tuto.name = "the_tuto" + tuto.set_dir_name() + assert tuto.name in tuto.dir + assert tuto.name in tuto.tuto_fp + assert tuto.name in tuto.slide_fp + assert tuto.name in tuto.data_lib_fp + assert tuto.name in tuto.wf_dir + assert tuto.name in tuto.wf_fp + + +def test_tutorial_exists(): + """Test :func:`planemo.training.tutorial.tutorial.exists`.""" + # default + tuto = Tutorial( + training=training, + topic=topic) + assert not tuto.exists() + # after dir creation + os.makedirs(tuto.dir) + assert tuto.exists() + shutil.rmtree("topics") + + +def test_tutorial_has_workflow(): + """Test :func:`planemo.training.tutorial.tutorial.has_workflow`.""" + # default + tuto = Tutorial( + training=training, + topic=topic) + assert not tuto.has_workflow() + # with wf filepath + tuto.init_wf_fp = WF_FP + assert tuto.has_workflow() + # with no wf filepah nor wf id + tuto.init_wf_fp = None + tuto.init_wf_id = '' + assert not tuto.has_workflow() + # with wf id + tuto.init_wf_id = 'ID' + assert tuto.has_workflow() + + +@skip_if_environ("PLANEMO_SKIP_GALAXY_TESTS") +def test_tutorial_export_workflow_file(): + """Test :func:`planemo.training.tutorial.tutorial.export_workflow_file`.""" + tuto = Tutorial( + training=training, + topic=topic) + os.makedirs(tuto.wf_dir) + # with worflow fp + tuto.init_wf_fp = WF_FP + tuto.export_workflow_file() + assert os.path.exists(tuto.wf_fp) + # with workflow id + tuto.init_wf_fp = None + os.remove(tuto.wf_fp) + assert is_galaxy_engine(**KWDS) + galaxy_url = 'http://%s:%s' % (KWDS['host'], KWDS['port']) + with engine_context(CTX, **KWDS) as galaxy_engine: + with galaxy_engine.ensure_runnables_served([RUNNABLE]) as config: + tuto.init_wf_id = config.workflow_id(WF_FP) + tuto.training.galaxy_url = galaxy_url + tuto.training.galaxy_api_key = config.user_api_key + tuto.export_workflow_file() + assert os.path.exists(tuto.wf_fp) + shutil.rmtree("topics") + + +def test_tutorial_get_files_from_zenodo(): + """Test :func:`planemo.training.tutorial.tutorial.get_files_from_zenodo`.""" + tuto = Tutorial( + training=training, + topic=topic, + zenodo_link=zenodo_link) + tuto.datatype_fp = datatype_fp + files, z_record = tuto.get_files_from_zenodo() + assert z_record == "1321885" + # test links + file_link_prefix = "https://zenodo.org/api/files/51a1b5db-ff05-4cda-83d4-3b46682f921f" + assert file_link_prefix in tuto.zenodo_file_links[0] + # test files dict + assert file_link_prefix in files[0]['url'] + assert files[0]['src'] == 'url' + assert files[0]['info'] == zenodo_link + assert "# Please add" in files[0]['ext'] + assert files[1]['ext'] == 'csv' + + +def test_tutorial_prepare_data_library_from_zenodo(): + """Test :func:`planemo.training.tutorial.tutorial.prepare_data_library_from_zenodo`.""" + # without zenodo link + tuto = Tutorial( + training=training, + topic=topic) + tuto.datatype_fp = datatype_fp + os.makedirs(tuto.wf_dir) + tuto.prepare_data_library_from_zenodo() + assert os.path.exists(tuto.data_lib_fp) + assert 'DOI' not in open(tuto.data_lib_fp, 'r').read() + # with zenodo link + tuto.zenodo_link = zenodo_link + tuto.prepare_data_library_from_zenodo() + assert "DOI: 10.5281/zenodo" in open(tuto.data_lib_fp, 'r').read() + shutil.rmtree("topics") + + +def test_tutorial_write_hands_on_tutorial(): + """Test :func:`planemo.training.tutorial.tutorial.write_hands_on_tutorial`.""" + tuto = Tutorial( + training=training, + topic=topic) + os.makedirs(tuto.wf_dir) + tuto.zenodo_file_links = ["URL1", "URL2"] + tuto.write_hands_on_tutorial() + assert os.path.exists(tuto.tuto_fp) + with open(tuto.tuto_fp, 'r') as tuto_f: + tuto_c = tuto_f.read() + assert 'layout: tutorial_hands_on' in tuto_c + assert '# Introduction' in tuto_c + assert 'URL1' in tuto_c + assert '# Conclusion' in tuto_c + shutil.rmtree("topics") + + +@skip_if_environ("PLANEMO_SKIP_GALAXY_TESTS") +def test_tutorial_create_hands_on_tutorial(): + """Test :func:`planemo.training.tutorial.tutorial.create_hands_on_tutorial`.""" + tuto = Tutorial( + training=training, + topic=topic) + os.makedirs(tuto.wf_dir) + # with init_wf_id and no Galaxy URL + tuto.init_wf_id = 'ID' + tuto.training.galaxy_url = None + exp_exception = "No Galaxy URL given" + with assert_raises_regexp(Exception, exp_exception): + tuto.create_hands_on_tutorial(CTX) + # with init_wf_id and no Galaxy API key + tuto.init_wf_id = 'ID' + tuto.training.galaxy_url = 'http://%s:%s' % (KWDS['host'], KWDS['port']) + tuto.training.galaxy_api_key = None + exp_exception = "No API key to access the given Galaxy instance" + with assert_raises_regexp(Exception, exp_exception): + tuto.create_hands_on_tutorial(CTX) + # with init_wf_id + assert is_galaxy_engine(**KWDS) + with engine_context(CTX, **KWDS) as galaxy_engine: + with galaxy_engine.ensure_runnables_served([RUNNABLE]) as config: + tuto.init_wf_id = config.workflow_id(WF_FP) + tuto.training.galaxy_api_key = config.user_api_key + tuto.create_hands_on_tutorial(CTX) + assert os.path.exists(tuto.tuto_fp) + os.remove(tuto.tuto_fp) + # with init_wf_fp + tuto.init_wf_id = None + tuto.init_wf_fp = WF_FP + tuto.create_hands_on_tutorial(CTX) + assert os.path.exists(tuto.tuto_fp) + shutil.rmtree("topics") + + +@skip_if_environ("PLANEMO_SKIP_GALAXY_TESTS") +def test_tutorial_create_tutorial(): + """Test :func:`planemo.training.tutorial.tutorial.create_tutorial`.""" + tuto = Tutorial( + training=training, + topic=topic) + tuto.init_from_kwds({ + 'tutorial_name': "my_tuto", + 'tutorial_title': "Title of tuto", + 'hands_on': True, + 'slides': True, + 'workflow': WF_FP, + 'workflow_id': None, + 'zenodo_link': zenodo_link, + 'datatypes': datatype_fp + }) + tuto.create_tutorial(CTX) + assert os.path.exists(tuto.dir) + assert os.path.exists(tuto.tour_dir) + assert os.path.exists(tuto.wf_dir) + assert os.path.exists(tuto.data_lib_fp) + assert os.path.exists(tuto.tuto_fp) + assert os.path.exists(tuto.slide_fp) + assert 'layout: tutorial_slides' in open(tuto.slide_fp, 'r').read() + shutil.rmtree("topics") diff --git a/tests/test_training_utils.py b/tests/test_training_utils.py new file mode 100644 index 000000000..d60df8a09 --- /dev/null +++ b/tests/test_training_utils.py @@ -0,0 +1,109 @@ +"""Training:utils functions.""" +import os + +from planemo.training.utils import ( + load_yaml, + Reference, + Requirement, + save_to_yaml +) +from .test_utils import TEST_DATA_DIR + +metadata_fp = os.path.join(TEST_DATA_DIR, "training_metadata.yaml") + + +def test_load_yaml(): + """Test :func:`planemo.training.utils.load_yaml`.""" + metadata = load_yaml(metadata_fp) + # test if name there + assert metadata["name"] == "test" + # test if order of material is conserved + assert metadata['maintainers'][0] == 'maintainer1' + + +def test_save_to_yaml(): + """Test :func:`planemo.training.utils.save_to_yaml`.""" + metadata = load_yaml(metadata_fp) + new_metadata_fp = "metadata.yaml" + save_to_yaml(metadata, new_metadata_fp) + assert os.path.exists(new_metadata_fp) + os.remove(new_metadata_fp) + + +def test_requirement_init(): + """Test :func:`planemo.training.utils.Requirement.init`.""" + # test requirement with default parameter + req = Requirement() + assert req.title == "" + assert req.type == "internal" + assert req.link == "/introduction/" + # test requirement with non default + req = Requirement(title="Introduction", req_type="external", link="URL") + assert req.title == "Introduction" + assert req.type == "external" + assert req.link == "URL" + + +def test_requirement_init_from_dict(): + """Test :func:`planemo.training.utils.Requirement.init_from_dict`.""" + req = Requirement() + req.init_from_dict({ + 'title': 'The Requirement', + 'type': 'external', + 'link': "http://URL" + }) + assert req.title == 'The Requirement' + assert req.type == 'external' + assert req.link == "http://URL" + + +def test_requirement_export_to_ordered_dict(): + """Test :func:`planemo.training.utils.Requirement.export_to_ordered_dict`.""" + req = Requirement() + exp_req = req.export_to_ordered_dict() + assert 'title' in exp_req + assert exp_req['title'] == "" + assert 'type' in exp_req + assert 'link' in exp_req + + +def test_reference_init(): + """Test :func:`planemo.training.utils.Reference.init`.""" + # test requirement with default parameter + ref = Reference() + assert ref.authors == "authors et al" + assert ref.title == "the title" + assert ref.link == "link" + assert ref.summary == "Why this reference is useful" + # test requirement with non default + ref = Reference(authors="the authors", title="a title", link="URL", summary="The summary") + assert ref.authors == "the authors" + assert ref.title == "a title" + assert ref.link == "URL" + assert ref.summary == "The summary" + + +def test_reference_init_from_dict(): + """Test :func:`planemo.training.utils.Reference.init_from_dict`.""" + ref = Reference() + ref.init_from_dict({ + 'authors': 'my author et al', + 'title': 'The Reference to read', + 'link': "http://URL", + 'summary': 'why we should read it' + }) + assert ref.authors == 'my author et al' + assert ref.title == 'The Reference to read' + assert ref.link == "http://URL" + assert ref.summary == "why we should read it" + + +def test_reference_export_to_ordered_dict(): + """Test :func:`planemo.training.utils.Reference.export_to_ordered_dict`.""" + ref = Reference() + exp_ref = ref.export_to_ordered_dict() + assert 'authors' in exp_ref + assert 'title' in exp_ref + assert exp_ref['title'] == "the title" + assert 'link' in exp_ref + assert 'summary' in exp_ref