diff --git a/client/src/components/Workflow/Editor/Attributes.test.js b/client/src/components/Workflow/Editor/Attributes.test.js
index 13dc05ce7eb0..d4f15b07be9b 100644
--- a/client/src/components/Workflow/Editor/Attributes.test.js
+++ b/client/src/components/Workflow/Editor/Attributes.test.js
@@ -1,5 +1,6 @@
import { mount, createLocalVue } from "@vue/test-utils";
import Attributes from "./Attributes";
+import { LegacyParameters } from "./modules/utilities";
jest.mock("app");
@@ -9,12 +10,15 @@ const TEST_NAME = "workflow_name";
describe("Attributes", () => {
it("test attributes", async () => {
const localVue = createLocalVue();
+ const legacyParameters = new LegacyParameters();
+ legacyParameters.getParameter("workflow_parameter_0");
+ legacyParameters.getParameter("workflow_parameter_1");
const wrapper = mount(Attributes, {
propsData: {
id: "workflow_id",
name: TEST_NAME,
tags: ["workflow_tag_0", "workflow_tag_1"],
- parameters: ["workflow_parameter_0", "workflow_parameter_1"],
+ parameters: legacyParameters,
versions: ["workflow_version_0"],
annotation: TEST_ANNOTATION,
},
diff --git a/client/src/components/Workflow/Editor/Attributes.vue b/client/src/components/Workflow/Editor/Attributes.vue
index 53e3b4be7425..e2ef3e7dd933 100644
--- a/client/src/components/Workflow/Editor/Attributes.vue
+++ b/client/src/components/Workflow/Editor/Attributes.vue
@@ -19,8 +19,8 @@
Parameters
- {{ key + 1 }}: {{ p }}
+ {{ key + 1 }}: {{ p.name }}
@@ -59,6 +59,7 @@ import Vue from "vue";
import BootstrapVue from "bootstrap-vue";
import moment from "moment";
import { Services } from "components/Workflow/services";
+import { LegacyParameters } from "components/Workflow/Editor/modules/utilities";
import Tags from "components/Common/Tags";
import LicenseSelector from "components/License/LicenseSelector";
import CreatorEditor from "components/SchemaOrg/CreatorEditor";
@@ -104,7 +105,7 @@ export default {
default: null,
},
parameters: {
- type: Array,
+ type: LegacyParameters,
default: null,
},
},
@@ -132,7 +133,7 @@ export default {
return creator;
},
hasParameters() {
- return this.parameters.length > 0;
+ return this.parameters && this.parameters.parameters.length > 0;
},
versionOptions() {
const versions = [];
diff --git a/client/src/components/Workflow/Editor/Index.vue b/client/src/components/Workflow/Editor/Index.vue
index 837c3bbb88d5..801a5b99cd18 100644
--- a/client/src/components/Workflow/Editor/Index.vue
+++ b/client/src/components/Workflow/Editor/Index.vue
@@ -93,6 +93,7 @@
@onLayout="onLayout"
@onEdit="onEdit"
@onAttributes="onAttributes"
+ @onLint="onLint"
/>
@@ -115,6 +116,19 @@
@onLicense="onLicense"
@onCreator="onCreator"
/>
+
@@ -127,14 +141,15 @@
+
+
diff --git a/client/src/components/Workflow/Editor/LintSection.vue b/client/src/components/Workflow/Editor/LintSection.vue
new file mode 100644
index 000000000000..004df07a7417
--- /dev/null
+++ b/client/src/components/Workflow/Editor/LintSection.vue
@@ -0,0 +1,82 @@
+
+
+
+
+
+ {{ title }}
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/client/src/components/Workflow/Editor/Node.vue b/client/src/components/Workflow/Editor/Node.vue
index a87c52190e69..e767c31d8436 100644
--- a/client/src/components/Workflow/Editor/Node.vue
+++ b/client/src/components/Workflow/Editor/Node.vue
@@ -225,6 +225,9 @@ export default {
isEnabled() {
return getGalaxyInstance().config.enable_tool_recommendations;
},
+ isInput() {
+ return this.type == "data_input" || this.type == "data_collection_input" || this.type == "parameter_input";
+ },
},
methods: {
onChange() {
diff --git a/client/src/components/Workflow/Editor/Options.vue b/client/src/components/Workflow/Editor/Options.vue
index b032ee71dc9c..074721e79051 100644
--- a/client/src/components/Workflow/Editor/Options.vue
+++ b/client/src/components/Workflow/Editor/Options.vue
@@ -54,6 +54,9 @@
Save As...
+ Check for Best Practices
Auto Layout
diff --git a/client/src/components/Workflow/Editor/modules/model.js b/client/src/components/Workflow/Editor/modules/model.js
index 15c9c8e677fe..789309ab5ba8 100644
--- a/client/src/components/Workflow/Editor/modules/model.js
+++ b/client/src/components/Workflow/Editor/modules/model.js
@@ -31,6 +31,13 @@ export function fromSimple(workflow, data, appendData = false) {
});
Vue.nextTick(() => {
// Second pass, connections
+ let using_workflow_outputs = false;
+ Object.entries(data.steps).forEach(([id, step]) => {
+ if (step.workflow_outputs && step.workflow_outputs.length > 0) {
+ using_workflow_outputs = true;
+ }
+ });
+
Object.entries(data.steps).forEach(([id, step]) => {
const nodeIndex = parseInt(id) + offset;
const node = workflow.nodes[nodeIndex];
@@ -49,12 +56,14 @@ export function fromSimple(workflow, data, appendData = false) {
}
});
- // Older workflows contain HideDatasetActions only, but no active outputs yet.
- Object.values(node.outputs).forEach((ot) => {
- if (!node.postJobActions[`HideDatasetAction${ot.name}`]) {
- node.activeOutputs.add(ot.name);
- }
- });
+ if (!using_workflow_outputs) {
+ // Older workflows contain HideDatasetActions only, but no active outputs yet.
+ Object.values(node.outputs).forEach((ot) => {
+ if (!node.postJobActions[`HideDatasetAction${ot.name}`]) {
+ node.activeOutputs.add(ot.name);
+ }
+ });
+ }
});
});
});
diff --git a/client/src/components/Workflow/Editor/modules/services.js b/client/src/components/Workflow/Editor/modules/services.js
index ff979e96e734..8a74c0be5c33 100644
--- a/client/src/components/Workflow/Editor/modules/services.js
+++ b/client/src/components/Workflow/Editor/modules/services.js
@@ -22,6 +22,23 @@ export async function getModule(request_data) {
}
}
+export async function refactor(workflow, id, actions, dryRun = false) {
+ try {
+ const requestData = {
+ actions: actions,
+ style: "editor",
+ dry_run: dryRun,
+ };
+ const { data } = await axios.put(`${getAppRoot()}api/workflows/${id}/refactor`, requestData);
+ if (!dryRun) {
+ fromSimple(workflow, data);
+ }
+ return data;
+ } catch (e) {
+ rethrowSimple(e);
+ }
+}
+
export async function loadWorkflow(workflow, id, version, appendData) {
try {
const versionQuery = Number.isInteger(version) ? `version=${version}` : "";
diff --git a/client/src/components/Workflow/Editor/modules/utilities.js b/client/src/components/Workflow/Editor/modules/utilities.js
index 582b5cd9f605..382823a32f66 100644
--- a/client/src/components/Workflow/Editor/modules/utilities.js
+++ b/client/src/components/Workflow/Editor/modules/utilities.js
@@ -70,6 +70,11 @@ export function showAttributes() {
$("#edit-attributes").show();
}
+export function showLint() {
+ $(".right-content").hide();
+ $("#lint-panel").show();
+}
+
export function showForm(workflow, node, datatypes) {
if (node && node.config_form && Object.keys(node.config_form).length > 0) {
const cls = "right-content";
@@ -127,17 +132,76 @@ export function showUpgradeMessage(data) {
return hasToolUpgrade;
}
-export function getWorkflowParameters(nodes) {
+class LegacyParameterReference {
+ constructor(parameter, node) {
+ //this.node = node;
+ parameter.references.push(this);
+ this.nodeId = node.id;
+ }
+}
+
+class ToolInputLegacyParameterReference extends LegacyParameterReference {
+ constructor(parameter, node, tooInput) {
+ super(parameter, node);
+ this.toolInput = tooInput;
+ }
+}
+
+class PjaLegacyParameterReference extends LegacyParameterReference {
+ constructor(parameter, node, pja) {
+ super(parameter, node);
+ this.pja = pja;
+ }
+}
+
+class LegacyParameter {
+ constructor(name) {
+ this.name = name;
+ this.references = [];
+ }
+
+ canExtract() {
+ // Backend will indicate errors but would be a bit better to pre-check for them
+ // in a future iteration.
+ // return false if mixed input types or if say integers are used in PJA?
+ return true;
+ }
+}
+
+export class LegacyParameters {
+ constructor() {
+ this.parameters = [];
+ }
+
+ getParameter(name) {
+ for (const parameter of this.parameters) {
+ if (parameter.name == name) {
+ return parameter;
+ }
+ }
+ const legacyParameter = new LegacyParameter(name);
+ this.parameters.push(legacyParameter);
+ return legacyParameter;
+ }
+
+ getParameterFromMatch(match) {
+ return this.getParameter(match.substring(2, match.length - 1));
+ }
+}
+
+export function getLegacyWorkflowParameters(nodes) {
+ const legacyParameters = new LegacyParameters();
const parameter_re = /\$\{.+?\}/g;
- const parameters = [];
- let matches = [];
Object.entries(nodes).forEach(([k, node]) => {
if (node.config_form && node.config_form.inputs) {
Utils.deepeach(node.config_form.inputs, (d) => {
if (typeof d.value == "string") {
var form_matches = d.value.match(parameter_re);
if (form_matches) {
- matches = matches.concat(form_matches);
+ for (const match of form_matches) {
+ const legacyParameter = legacyParameters.getParameterFromMatch(match);
+ new ToolInputLegacyParameterReference(legacyParameter, node, d);
+ }
}
}
});
@@ -149,25 +213,89 @@ export function getWorkflowParameters(nodes) {
if (typeof action_argument === "string") {
const arg_matches = action_argument.match(parameter_re);
if (arg_matches) {
- matches = matches.concat(arg_matches);
+ for (const match of arg_matches) {
+ const legacyParameter = legacyParameters.getParameterFromMatch(match);
+ new PjaLegacyParameterReference(legacyParameter, node, pja);
+ }
}
}
});
}
});
}
- if (matches) {
- Object.entries(matches).forEach(([k, element]) => {
- if (parameters.indexOf(element) === -1) {
- parameters.push(element);
- }
- });
+ });
+ return legacyParameters;
+}
+
+export function getDisconnectedInputs(nodes) {
+ const inputs = [];
+ Object.entries(nodes).forEach(([k, node]) => {
+ Object.entries(node.inputTerminals).forEach(([inputName, inputTerminal]) => {
+ if (inputTerminal.connectors && inputTerminal.connectors.length > 0) {
+ return;
+ }
+ if (inputTerminal.optional) {
+ return;
+ }
+ const input = {
+ inputName: inputName,
+ stepId: node.id,
+ stepLabel: node.title, // label but falls back to tool title...
+ stepIconClass: node.iconClass,
+ inputLabel: inputTerminal.attributes.input.label,
+ canExtract: !inputTerminal.multiple,
+ };
+ inputs.push(input);
+ });
+ });
+ return inputs;
+}
+
+export function getInputsMissingMetadata(nodes) {
+ const inputs = [];
+ Object.entries(nodes).forEach(([k, node]) => {
+ if (!node.isInput) {
+ return;
+ }
+ const annotation = node.annotation;
+ const label = node.label;
+ const missingLabel = !label;
+ const missingAnnotation = !annotation;
+
+ if (missingLabel || missingAnnotation) {
+ const input = {
+ stepLabel: node.title,
+ stepIconClass: node.iconClass,
+ missingAnnotation: !annotation,
+ missingLabel: !label,
+ };
+ inputs.push(input);
}
});
- Object.entries(parameters).forEach(([k, element]) => {
- parameters[k] = element.substring(2, element.length - 1);
+ return inputs;
+}
+
+export function getWorkflowOutputs(nodes) {
+ const outputs = [];
+ Object.entries(nodes).forEach(([k, node]) => {
+ if (node.isInput) {
+ // For now skip these... maybe should push this logic into linting though
+ // since it is fine to have outputs on inputs.
+ return;
+ }
+ const activeOutputs = node.activeOutputs;
+ for (const outputDef of activeOutputs.getAll()) {
+ const output = {
+ outputName: outputDef.output_name,
+ outputLabel: outputDef.label,
+ stepId: node.id,
+ stepLabel: node.title, // label but falls back to tool title...
+ stepIconClass: node.iconClass,
+ };
+ outputs.push(output);
+ }
});
- return parameters;
+ return outputs;
}
export function saveAs(workflow) {
diff --git a/client/src/components/Workflow/Run/model.js b/client/src/components/Workflow/Run/model.js
index 753efd272e34..a35ba4eeccb9 100644
--- a/client/src/components/Workflow/Run/model.js
+++ b/client/src/components/Workflow/Run/model.js
@@ -20,6 +20,8 @@ export class WorkflowRunModel {
this.links = [];
this.parms = [];
this.wpInputs = {};
+ this.parameterInputLabels = [];
+
let hasOpenToolSteps = false;
let hasReplacementParametersInToolForm = false;
@@ -63,6 +65,10 @@ export class WorkflowRunModel {
this.steps[i] = step;
this.links[i] = [];
this.parms[i] = {};
+
+ if (step.step_type == "parameter_input" && step.step_label) {
+ this.parameterInputLabels.push(step.step_label);
+ }
});
// build linear index of step input pairs
@@ -141,7 +147,9 @@ export class WorkflowRunModel {
});
});
_.each(step.replacement_parameters, (wp_name) => {
- _ensureWorkflowParameter(wp_name);
+ if (this.parameterInputLabels.indexOf(wp_name) == -1) {
+ _ensureWorkflowParameter(wp_name);
+ }
});
});
diff --git a/lib/galaxy/app.py b/lib/galaxy/app.py
index c1dc83ac0f5c..5197464967f4 100644
--- a/lib/galaxy/app.py
+++ b/lib/galaxy/app.py
@@ -20,7 +20,10 @@
from galaxy.managers.roles import RoleManager
from galaxy.managers.tools import DynamicToolManager
from galaxy.managers.users import UserManager
-from galaxy.managers.workflows import WorkflowsManager
+from galaxy.managers.workflows import (
+ WorkflowContentsManager,
+ WorkflowsManager,
+)
from galaxy.model.database_heartbeat import DatabaseHeartbeat
from galaxy.model.tags import GalaxyTagHandler
from galaxy.queue_worker import (
@@ -105,6 +108,7 @@ def __init__(self, **kwargs):
self.history_manager = HistoryManager(self)
self.hda_manager = HDAManager(self)
self.workflow_manager = WorkflowsManager(self)
+ self.workflow_contents_manager = WorkflowContentsManager(self)
self.dependency_resolvers_view = DependencyResolversView(self)
self.test_data_resolver = test_data.TestDataResolver(file_dirs=self.config.tool_test_data_directories)
self.library_folder_manager = FolderManager()
diff --git a/lib/galaxy/managers/workflows.py b/lib/galaxy/managers/workflows.py
index a309e603f3dd..b1da73f586ab 100644
--- a/lib/galaxy/managers/workflows.py
+++ b/lib/galaxy/managers/workflows.py
@@ -39,6 +39,7 @@
ToolModule,
WorkflowModuleInjector
)
+from galaxy.workflow.refactor.execute import WorkflowRefactorExecutor
from galaxy.workflow.reports import generate_report
from galaxy.workflow.resources import get_resource_mapper_function
from galaxy.workflow.steps import attach_ordered_steps
@@ -305,7 +306,7 @@ def normalize_workflow_format(self, trans, as_dict):
Currently this mostly means converting format 2 workflows into standard Galaxy
workflow JSON for consumption for the rest of this module. In the future we will
- want to be a lot more percise about this - preserve the original description along
+ want to be a lot more precise about this - preserve the original description along
side the data model and apply updates in a way that largely preserves YAML structure
so workflows can be extracted.
"""
@@ -421,6 +422,16 @@ def update_workflow_from_raw_description(self, trans, stored_workflow, raw_workf
# Connect up
workflow.stored_workflow = stored_workflow
stored_workflow.latest_workflow = workflow
+
+ # I'm not sure we can't just default this to True.
+ if kwds.get("update_stored_workflow_attributes", False):
+ update_dict = raw_workflow_description.as_dict
+ if 'name' in update_dict:
+ stored_workflow.name = update_dict['name']
+ if 'annotation' in update_dict:
+ newAnnotation = sanitize_html(update_dict['annotation'])
+ self.add_item_annotation(trans.sa_session, stored_workflow.user, stored_workflow, newAnnotation)
+
# Persist
trans.sa_session.flush()
if stored_workflow.from_path:
@@ -447,6 +458,12 @@ def _workflow_from_raw_description(self, trans, raw_workflow_description, name,
workflow.license = data.get('license')
workflow.creator_metadata = data.get('creator')
+ if 'license' in data:
+ workflow.license = data['license']
+
+ if 'creator' in data:
+ workflow.creator_metadata = data['creator']
+
# Assume no errors until we find a step that has some
workflow.has_errors = False
# Create each step
@@ -528,7 +545,7 @@ def to_format_2(wf_dict, **kwds):
elif style == "ga":
wf_dict = self._workflow_to_dict_export(trans, stored, workflow=workflow)
else:
- raise exceptions.RequestParameterInvalidException('Unknown workflow style [%s]' % style)
+ raise exceptions.RequestParameterInvalidException(f'Unknown workflow style {style}')
if version is not None:
wf_dict['version'] = version
else:
@@ -1264,8 +1281,7 @@ def __module_from_dict(self, trans, steps, steps_by_external_id, step_dict, **kw
representing type-specific functionality from the incoming dictionary.
"""
step = model.WorkflowStep()
- # TODO: Consider handling position inside module.
- step.position = step_dict['position']
+ step.position = step_dict.get('position', model.WorkflowStep.DEFAULT_POSITION)
if step_dict.get("uuid", None) and step_dict['uuid'] != "None":
step.uuid = step_dict["uuid"]
if "label" in step_dict:
@@ -1275,13 +1291,13 @@ def __module_from_dict(self, trans, steps, steps_by_external_id, step_dict, **kw
self.__set_default_label(step, module, step_dict.get('tool_state'))
module.save_to_step(step)
- annotation = step_dict['annotation']
+ annotation = step_dict.get('annotation')
if annotation:
annotation = sanitize_html(annotation)
self.add_item_annotation(trans.sa_session, trans.get_user(), step, annotation)
# Stick this in the step temporarily
- step.temp_input_connections = step_dict['input_connections']
+ step.temp_input_connections = step_dict.get('input_connections', {})
# Create the model class for the step
steps.append(step)
@@ -1390,6 +1406,28 @@ def __set_default_label(self, step, module, state):
if default_label and util.unicodify(default_label).lower() not in ['input dataset', 'input dataset collection']:
step.label = module.label = default_label
+ def refactor(self, trans, stored_workflow, refactor_request):
+ """Apply supplied actions to stored_workflow.latest_workflow to build a new version.
+ """
+ workflow = stored_workflow.latest_workflow
+ as_dict = self.workflow_to_dict(trans, stored_workflow, style="ga")
+ raw_workflow_description = self.normalize_workflow_format(trans, as_dict)
+ module_injector = WorkflowModuleInjector(trans)
+ WorkflowRefactorExecutor(raw_workflow_description, workflow, module_injector).refactor(refactor_request)
+ if refactor_request.dry_run:
+ # TODO: go a bit further with dry run, try to re-populate a workflow just
+ # don't flush it or tie it to the stored_workflow. Still for now, there is
+ # a lot of things that would be caught with just what is done here.
+ return None, []
+ else:
+ return self.update_workflow_from_raw_description(
+ trans,
+ stored_workflow,
+ raw_workflow_description,
+ fill_defaults=True,
+ update_stored_workflow_attributes=True,
+ )
+
class MissingToolsException(exceptions.MessageException):
diff --git a/lib/galaxy/model/__init__.py b/lib/galaxy/model/__init__.py
index 2f364a959951..b48023a22f9e 100644
--- a/lib/galaxy/model/__init__.py
+++ b/lib/galaxy/model/__init__.py
@@ -4905,6 +4905,12 @@ def step_by_index(self, order_index):
return step
raise KeyError("Workflow has no step with order_index '%s'" % order_index)
+ def step_by_label(self, label):
+ for step in self.steps:
+ if label == step.label:
+ return step
+ raise KeyError("Workflow has no step with label '%s'" % label)
+
@property
def input_steps(self):
for step in self.steps:
@@ -4994,6 +5000,7 @@ class WorkflowStep(RepresentById):
"data_collection_input": "dataset_collection",
"parameter_input": "parameter",
}
+ DEFAULT_POSITION = {"left": 0, "top": 0}
def __init__(self):
self.id = None
diff --git a/lib/galaxy/webapps/galaxy/api/workflows.py b/lib/galaxy/webapps/galaxy/api/workflows.py
index d98231e2a8f7..662858883c04 100644
--- a/lib/galaxy/webapps/galaxy/api/workflows.py
+++ b/lib/galaxy/webapps/galaxy/api/workflows.py
@@ -45,6 +45,7 @@
)
from galaxy.workflow.extract import extract_workflow
from galaxy.workflow.modules import module_factory
+from galaxy.workflow.refactor.schema import RefactorRequest
from galaxy.workflow.run import invoke, queue_invoke
from galaxy.workflow.run_request import build_workflow_run_configs
@@ -633,6 +634,31 @@ def update(self, trans, id, payload, **kwds):
raise exceptions.RequestParameterInvalidException(message)
return self.workflow_contents_manager.workflow_to_dict(trans, stored_workflow, style="instance")
+ @expose_api
+ def refactor(self, trans, id, payload, **kwds):
+ """
+ * PUT /api/workflows/{id}/refactor
+ updates the workflow stored with ``id``
+
+ :type id: str
+ :param id: the encoded id of the workflow to update
+ :param instance: true if fetch by Workflow ID instead of StoredWorkflow id, false
+ by default.
+ :type instance: boolean
+ :type payload: dict
+ :param payload: a dictionary containing list of actions to apply.
+ :rtype: dict
+ :returns: serialized version of the workflow
+ """
+ stored_workflow = self.__get_stored_workflow(trans, id, **kwds)
+ refactor_request = RefactorRequest(**payload)
+ style = payload.get("style", "export")
+ result, errors = self.workflow_contents_manager.refactor(
+ trans, stored_workflow, refactor_request
+ )
+ # TODO: handle errors...
+ return self.workflow_contents_manager.workflow_to_dict(trans, stored_workflow, style=style)
+
@expose_api
def build_module(self, trans, payload=None):
"""
diff --git a/lib/galaxy/webapps/galaxy/buildapp.py b/lib/galaxy/webapps/galaxy/buildapp.py
index 011d19c70a0a..f94878736e61 100644
--- a/lib/galaxy/webapps/galaxy/buildapp.py
+++ b/lib/galaxy/webapps/galaxy/buildapp.py
@@ -445,6 +445,7 @@ def populate_api_routes(webapp, app):
webapp.mapper.connect('/api/workflows/build_module', action='build_module', controller="workflows")
webapp.mapper.connect('/api/workflows/menu', action='get_workflow_menu', controller="workflows", conditions=dict(method=["GET"]))
webapp.mapper.connect('/api/workflows/menu', action='set_workflow_menu', controller="workflows", conditions=dict(method=["PUT"]))
+ webapp.mapper.connect('/api/workflows/{id}/refactor', action='refactor', controller="workflows", conditions=dict(method=["PUT"]))
webapp.mapper.resource('workflow', 'workflows', path_prefix='/api')
webapp.mapper.connect('/api/licenses', controller='licenses', action='index')
webapp.mapper.connect('/api/licenses/{id}', controller='licenses', action='get')
diff --git a/lib/galaxy/workflow/modules.py b/lib/galaxy/workflow/modules.py
index 0fff90933739..bc3db659b756 100644
--- a/lib/galaxy/workflow/modules.py
+++ b/lib/galaxy/workflow/modules.py
@@ -776,6 +776,7 @@ def _parse_state_into_dict(self):
class InputParameterModule(WorkflowModule):
+ POSSIBLE_PARAMETER_TYPES = ["text", "integer", "float", "boolean", "color"]
type = "parameter_input"
name = "Input parameter"
default_parameter_type = "text"
diff --git a/lib/galaxy/workflow/refactor/__init__.py b/lib/galaxy/workflow/refactor/__init__.py
new file mode 100644
index 000000000000..e69de29bb2d1
diff --git a/lib/galaxy/workflow/refactor/execute.py b/lib/galaxy/workflow/refactor/execute.py
new file mode 100644
index 000000000000..d40172cdc75e
--- /dev/null
+++ b/lib/galaxy/workflow/refactor/execute.py
@@ -0,0 +1,392 @@
+import logging
+
+from galaxy.exceptions import (
+ RequestParameterInvalidException,
+)
+from galaxy.tools.parameters import (
+ visit_input_values,
+)
+from galaxy.tools.parameters.basic import (
+ ConnectedValue,
+ contains_workflow_parameter,
+ runtime_to_json,
+)
+from .schema import (
+ AddInputAction,
+ AddStepAction,
+ ConnectAction,
+ DisconnectAction,
+ ExtractInputAction,
+ ExtractLegacyParameter,
+ InputReferenceByOrderIndex,
+ OutputReferenceByOrderIndex,
+ RefactorRequest,
+ RemoveUnlabeledWorkflowOutputs,
+ step_reference_union,
+ StepReferenceByLabel,
+ UpdateAnnotationAction,
+ UpdateCreatorAction,
+ UpdateLicenseAction,
+ UpdateNameAction,
+ UpdateReportAction,
+ UpdateStepLabelAction,
+ UpdateStepPositionAction,
+)
+from ..modules import (
+ InputParameterModule,
+ NO_REPLACEMENT,
+)
+
+log = logging.getLogger(__name__)
+
+
+class WorkflowRefactorExecutor:
+
+ def __init__(self, raw_workflow_description, workflow, module_injector):
+ # we mostly use the ga representation, but there may be cases where the
+ # models/modules of existing workflow are more usable.
+ self.raw_workflow_description = raw_workflow_description
+ self.workflow = workflow
+ self.module_injector = module_injector
+
+ def refactor(self, refactor_request: RefactorRequest):
+ for action in refactor_request.actions:
+ action_type = action.action_type
+ refactor_method_name = "_apply_%s" % action_type
+ refactor_method = getattr(self, refactor_method_name, None)
+ if refactor_method is None:
+ raise RequestParameterInvalidException(
+ f"Unknown workflow editing action encountered [{action_type}]"
+ )
+ refactor_method(action)
+
+ def _apply_update_step_label(self, action: UpdateStepLabelAction):
+ step = self._find_step_for_action(action)
+ step["label"] = action.label
+
+ def _apply_update_step_position(self, action: UpdateStepPositionAction):
+ step = self._find_step_for_action(action)
+ step["position"] = action.position.to_dict()
+
+ def _apply_update_name(self, action: UpdateNameAction):
+ self._as_dict["name"] = action.name
+
+ def _apply_update_annotation(self, action: UpdateAnnotationAction):
+ self._as_dict["annotation"] = action.annotation
+
+ def _apply_update_license(self, action: UpdateLicenseAction):
+ self._as_dict["license"] = action.license
+
+ def _apply_update_creator(self, action: UpdateCreatorAction):
+ self._as_dict["creator"] = action.creator
+
+ def _apply_update_report(self, action: UpdateReportAction):
+ self._as_dict["report"] = {"markdown": action.report.markdown}
+
+ def _apply_add_step(self, action: AddStepAction):
+ steps = self._as_dict["steps"]
+ order_index = len(steps)
+ step_dict = {
+ "order_index": order_index,
+ "id": "new_%d" % order_index,
+ "type": action.type,
+ }
+ if action.tool_state:
+ step_dict["tool_state"] = action.tool_state
+ if action.label:
+ step_dict["label"] = action.label
+ if action.position:
+ step_dict["position"] = action.position.to_dict()
+ steps[order_index] = step_dict
+
+ def _apply_add_input(self, action: AddInputAction):
+ input_type = action.type
+ module_type = None
+
+ tool_state = {}
+ if input_type in ["data", "dataset"]:
+ module_type = "data_input"
+ elif input_type in ["data_collection", "dataset_collection"]:
+ module_type == "data_collection_input"
+ tool_state["collection_type"] = action.collection_type
+ else:
+ if input_type not in InputParameterModule.POSSIBLE_PARAMETER_TYPES:
+ raise RequestParameterInvalidException(f"Invalid input type {input_type} encountered")
+ module_type = "parameter_input"
+ tool_state["parameter_type"] = input_type
+
+ for action_key in ["restrictions", "suggestions", "optional", "default"]:
+ value = getattr(action, action_key, None)
+ if value is not None:
+ tool_state[action_key] = value
+
+ if action.restrict_on_connections is not None:
+ tool_state["restrictOnConnections"] = action.restrict_on_connections
+
+ add_step_kwds = {}
+ if action.label:
+ add_step_kwds["label"] = action.label
+
+ add_step_action = AddStepAction(
+ action_type="add_step",
+ type=module_type,
+ tool_state=tool_state,
+ position=action.position,
+ **add_step_kwds
+ )
+ self._apply_add_step(add_step_action)
+
+ def _apply_disconnect(self, action: DisconnectAction):
+ input_step_dict, input_name, output_step_dict, output_name = self._connection(action)
+ output_order_index = output_step_dict["id"] # wish this was order_index...
+ # default name is name used for input's output terminal - following
+ # format2 convention of allowing this be absent for clean references
+ # to workflow inputs.
+ all_input_connections = input_step_dict.get("input_connections")
+ self.normalize_input_connections_to_list(all_input_connections, input_name)
+ input_connections = all_input_connections[input_name]
+
+ # multiple outputs attached to this inputs, just detach
+ # that specific one.
+ delete_index = None
+ for connection_index, output in enumerate(input_connections):
+ if output["id"] == output_order_index and output["output_name"] == output_name:
+ delete_index = connection_index
+ break
+ if delete_index is None:
+ raise RequestParameterInvalidException("Failed to locate connection to disconnect")
+ del input_connections[delete_index]
+
+ def _apply_connect(self, action: ConnectAction):
+ input_step_dict, input_name, output_step_dict, output_name = self._connection(action)
+ output_order_index = output_step_dict["id"] # wish this was order_index...
+ all_input_connections = input_step_dict.get("input_connections")
+ self.normalize_input_connections_to_list(all_input_connections, input_name, add_if_missing=True)
+ input_connections = all_input_connections[input_name]
+ input_connections.append({
+ 'id': output_order_index,
+ 'output_name': output_name,
+ })
+
+ def _apply_extract_input(self, action: ExtractInputAction):
+ input_step_dict, input_name = self._input_from_action(action)
+ step = self._step_with_module(input_step_dict["id"])
+ module = step.module
+ inputs = module.get_all_inputs()
+
+ input_def = None
+ found_input_names = []
+ for input in inputs:
+ found_input_name = input["name"]
+ found_input_names.append(found_input_name)
+ if found_input_name == input_name:
+ input_def = input
+ break
+ if input_def is None:
+ raise RequestParameterInvalidException(f"Failed to find input with name {input_name} on step {input_step_dict['id']} - input names found {found_input_names}")
+ if input_def.get("multiple", False):
+ raise RequestParameterInvalidException("Cannot extract input for multi-input inputs")
+
+ module_input_type = input_def.get("input_type")
+ # convert dataset, dataset_collection => data, data_collection for refactor API
+ input_type = {
+ "dataset": "data",
+ "dataset_collection": "data_collection",
+ }.get(module_input_type, module_input_type)
+
+ input_action = AddInputAction(
+ action_type="add_input",
+ optional=input_def.get("optional"),
+ type=input_type,
+ label=action.label,
+ position=action.position,
+ )
+ new_input_order_index = self._add_input_get_order_index(input_action)
+ connect_action = ConnectAction(
+ action_type="connect",
+ input=action.input,
+ output=OutputReferenceByOrderIndex(order_index=new_input_order_index),
+ )
+ self._apply_connect(connect_action)
+
+ def _apply_extract_legacy_parameter(self, action: ExtractLegacyParameter):
+ legacy_parameter_name = action.name
+ new_label = action.label or legacy_parameter_name
+ target_value = "${%s}" % legacy_parameter_name
+
+ target_tool_inputs = []
+ rename_pjas = []
+
+ for step_def, step in self._iterate_over_step_pairs():
+ module = step.module
+ if module.type != "tool":
+ continue
+
+ tool = module.tool
+ tool_inputs = module.state
+
+ replace_tool_state = False
+
+ def callback(input, prefixed_name, context, value=None, **kwargs):
+ nonlocal replace_tool_state
+ # data parameters cannot have legacy parameter values
+ if input.type in ['data', 'data_collection']:
+ return NO_REPLACEMENT
+
+ if not contains_workflow_parameter(value):
+ return NO_REPLACEMENT
+
+ if value == target_value:
+ target_tool_inputs.append((step.order_index, input, prefixed_name))
+ replace_tool_state = True
+ return runtime_to_json(ConnectedValue())
+ else:
+ return NO_REPLACEMENT
+ visit_input_values(tool.inputs, tool_inputs.inputs, callback, no_replacement_value=NO_REPLACEMENT)
+ if replace_tool_state:
+ step_def["tool_state"] = step.module.get_tool_state()
+
+ for post_job_action in self._iterate_over_rename_pjas():
+ newname = post_job_action.get("action_arguments", {}).get("newname")
+ if target_value in newname:
+ rename_pjas.append(post_job_action)
+
+ if len(target_tool_inputs) == 0 and len(rename_pjas) == 0:
+ raise RequestParameterInvalidException(f"Failed to find {target_value} in the tool state or any workflow steps.")
+
+ as_parameter_type = {
+ "text": "text",
+ "integer": "integer",
+ "float": "float",
+ "select": "text",
+ "genomebuild": "text",
+ }
+ target_parameter_types = set()
+ for _, tool_input, _ in target_tool_inputs:
+ tool_input_type = tool_input.type
+ if tool_input_type not in as_parameter_type:
+ raise RequestParameterInvalidException("Extracting inputs for parameters on tool inputs of type {tool_input_type} is unsupported")
+ target_parameter_type = as_parameter_type[tool_input_type]
+ target_parameter_types.add(target_parameter_type)
+
+ if len(target_parameter_types) > 1:
+ raise RequestParameterInvalidException("Extracting inputs for parameters on conflicting tool input types (e.g. numeric and non-numeric) input types is unsupported")
+
+ if len(target_parameter_types) == 1:
+ (target_parameter_type,) = target_parameter_types
+ else:
+ # only used in PJA, hence only used a string
+ target_parameter_type = "text"
+
+ for rename_pja in rename_pjas:
+ # if name != label, got to rewrite this rename with new label.
+ if legacy_parameter_name != new_label:
+ action_arguments = rename_pja.get("action_arguments")
+ old_newname = action_arguments["newname"]
+ new_newname = old_newname.replace(target_value, "${%s}" % new_label)
+ action_arguments["newname"] = new_newname
+
+ optional = False
+ input_action = AddInputAction(
+ action_type="add_input",
+ optional=optional,
+ type=target_parameter_type,
+ label=new_label,
+ position=action.position,
+ )
+ new_input_order_index = self._add_input_get_order_index(input_action)
+
+ for order_index, tool_input, prefixed_name in target_tool_inputs:
+ connect_input = InputReferenceByOrderIndex(order_index=order_index, input_name=prefixed_name)
+ connect_action = ConnectAction(
+ action_type="connect",
+ input=connect_input,
+ output=OutputReferenceByOrderIndex(order_index=new_input_order_index),
+ )
+ self._apply_connect(connect_action)
+
+ def _apply_remove_unlabeled_workflow_outputs(self, action: RemoveUnlabeledWorkflowOutputs):
+ for step in self._as_dict["steps"].values():
+ new_outputs = []
+ for workflow_output in step.get("workflow_outputs", []):
+ if workflow_output.get("label") is None:
+ continue
+ new_outputs.append(workflow_output)
+ step["workflow_outputs"] = new_outputs
+
+ def _find_step(self, step_reference: step_reference_union):
+ order_index = None
+ if isinstance(step_reference, StepReferenceByLabel):
+ label = step_reference.label
+ if not label:
+ raise RequestParameterInvalidException("Empty label provided.")
+ for step_order_index, step in self._as_dict["steps"].items():
+ if step["label"] == label:
+ order_index = step_order_index
+ break
+ else:
+ order_index = step_reference.order_index
+ if order_index is None:
+ raise RequestParameterInvalidException(f"Failed to resolve step_reference {step_reference}")
+ if len(self._as_dict["steps"]) <= order_index:
+ raise RequestParameterInvalidException(f"Failed to resolve step_reference {step_reference}")
+ return self._as_dict["steps"][order_index]
+
+ def _find_step_for_action(self, action):
+ step_reference = action.step
+ return self._find_step(step_reference)
+
+ def _step_with_module(self, order_index):
+ step = self.workflow.steps[order_index]
+ if not hasattr(step, "module"):
+ self.module_injector.inject(step)
+ return step
+
+ def _iterate_over_step_pairs(self):
+ # walk over both the dict-ified steps and the model steps (ensuring)
+ # module is attached.
+ for order_index, step_def in self._as_dict["steps"].items():
+ if order_index >= len(self.workflow.steps):
+ # newly added step during refactoring, don't iterate over it...
+ continue
+ else:
+ step = self._step_with_module(order_index)
+ yield step_def, step
+
+ def _iterate_over_rename_pjas(self):
+ for _, step_def in self._as_dict["steps"].items():
+ if step_def["type"] != "tool":
+ continue
+ post_job_actions = step_def.get("post_job_actions", [])
+ for post_job_action in post_job_actions.values():
+ if post_job_action["action_type"] == "RenameDatasetAction":
+ yield post_job_action
+
+ def _add_input_get_order_index(self, input_action: AddInputAction):
+ self._apply_add_input(input_action)
+ return len(self._as_dict["steps"]) - 1
+
+ def _input_from_action(self, action):
+ input_reference = action.input
+ input_step_dict = self._find_step(input_reference)
+ input_name = input_reference.input_name
+ return input_step_dict, input_name
+
+ def _connection(self, action):
+ input_step_dict, input_name = self._input_from_action(action)
+ output_reference = action.output
+ output_step_dict = self._find_step(output_reference)
+ output_name = output_reference.output_name
+ return input_step_dict, input_name, output_step_dict, output_name
+
+ @staticmethod
+ def normalize_input_connections_to_list(all_input_connections, input_name, add_if_missing=False):
+ if add_if_missing and input_name not in all_input_connections:
+ all_input_connections[input_name] = []
+ input_connections = all_input_connections[input_name]
+ if not isinstance(input_connections, list):
+ all_input_connections[input_name] = [input_connections]
+
+ @property
+ def _as_dict(self):
+ return self.raw_workflow_description.as_dict
diff --git a/lib/galaxy/workflow/refactor/schema.py b/lib/galaxy/workflow/refactor/schema.py
new file mode 100644
index 000000000000..e4aa34da5a5b
--- /dev/null
+++ b/lib/galaxy/workflow/refactor/schema.py
@@ -0,0 +1,194 @@
+from typing import Any, Dict, List, Optional, Union
+
+from pydantic import BaseModel
+from typing_extensions import Literal
+
+
+class StepReferenceByOrderIndex(BaseModel):
+ order_index: int
+
+
+class StepReferenceByLabel(BaseModel):
+ label: str
+
+
+step_reference_union = Union[StepReferenceByOrderIndex, StepReferenceByLabel]
+
+
+class InputReferenceByOrderIndex(StepReferenceByOrderIndex):
+ input_name: str
+
+
+class InputReferenceByLabel(StepReferenceByLabel):
+ input_name: str
+
+
+input_reference_union = Union[InputReferenceByOrderIndex, InputReferenceByLabel]
+
+
+class OutputReferenceByOrderIndex(StepReferenceByOrderIndex):
+ output_name: Optional[str] = "output"
+
+
+class OutputReferenceByLabel(StepReferenceByLabel):
+ output_name: Optional[str] = "output"
+
+
+output_reference_union = Union[OutputReferenceByOrderIndex, OutputReferenceByLabel]
+
+
+class Position(BaseModel):
+ left: float
+ top: float
+
+ def to_dict(self):
+ position = {
+ "left": self.left,
+ "top": self.top,
+ }
+ return position
+
+
+class BaseAction(BaseModel):
+ """Refactoring actions."""
+
+
+class Action:
+
+ @classmethod
+ def __get_validators__(cls):
+ yield cls.return_action
+
+ @classmethod
+ def return_action(cls, values):
+ try:
+ action_type = values["action_type"]
+ except KeyError:
+ raise ValueError(
+ f"Missing required 'action_type' field for refactoring action: {values}"
+ )
+ try:
+ return ACTION_CLASSES_BY_TYPE[action_type](**values)
+ except KeyError:
+ raise ValueError(f"Unknown action_type encountered: {action_type}")
+
+
+class UpdateStepLabelAction(BaseAction):
+ action_type: Literal['update_step_label']
+ label: str
+ step: step_reference_union
+
+
+class UpdateStepPositionAction(BaseAction):
+ action_type: Literal['update_step_position']
+ step: step_reference_union
+ position: Position
+
+
+class AddStepAction(BaseAction):
+ action_type: Literal['add_step']
+ type: str # module.type
+ tool_state: Optional[Dict[str, Any]]
+ label: Optional[str]
+ position: Optional[Position]
+
+
+class ConnectAction(BaseAction):
+ action_type: Literal['connect']
+ input: input_reference_union
+ output: output_reference_union
+
+
+class DisconnectAction(BaseAction):
+ action_type: Literal['disconnect']
+ input: input_reference_union
+ output: output_reference_union
+
+
+class AddInputAction(BaseAction):
+ action_type: Literal['add_input']
+ type: str
+ label: Optional[str]
+ position: Optional[Position]
+ collection_type: Optional[str]
+ restrictions: Optional[List[str]]
+ restrict_on_connections: Optional[bool]
+ suggestions: Optional[List[str]]
+ optional: Optional[bool] = False
+ default: Optional[Any] # this probably needs to be revisited when we have more complex field types
+
+
+class ExtractInputAction(BaseAction):
+ action_type: Literal['extract_input']
+ input: input_reference_union
+ label: Optional[str]
+ position: Optional[Position]
+
+
+class ExtractLegacyParameter(BaseAction):
+ action_type: Literal['extract_legacy_parameter']
+ name: str
+ label: Optional[str] # defaults to name if unset
+ position: Optional[Position]
+
+
+class RemoveUnlabeledWorkflowOutputs(BaseAction):
+ action_type: Literal['remove_unlabeled_workflow_outputs']
+
+
+class UpdateNameAction(BaseAction):
+ action_type: Literal['update_name']
+ name: str
+
+
+class UpdateAnnotationAction(BaseAction):
+ action_type: Literal['update_annotation']
+ annotation: str
+
+
+class UpdateLicenseAction(BaseAction):
+ action_type: Literal['update_license']
+ license: str
+
+
+class UpdateCreatorAction(BaseAction):
+ action_type: Literal['update_creator']
+ creator: Any
+
+
+class Report(BaseModel):
+ markdown: str
+
+
+class UpdateReportAction(BaseAction):
+ action_type: Literal['update_report']
+ report: Report
+
+
+union_action_classes = Union[
+ AddInputAction,
+ AddStepAction,
+ ConnectAction,
+ DisconnectAction,
+ ExtractInputAction,
+ ExtractLegacyParameter,
+ UpdateAnnotationAction,
+ UpdateCreatorAction,
+ UpdateNameAction,
+ UpdateLicenseAction,
+ UpdateReportAction,
+ UpdateStepLabelAction,
+ UpdateStepPositionAction,
+ RemoveUnlabeledWorkflowOutputs,
+]
+
+
+ACTION_CLASSES_BY_TYPE = {}
+for action_class in union_action_classes.__args__:
+ action_type = action_class.schema()["properties"]["action_type"]["const"]
+ ACTION_CLASSES_BY_TYPE[action_type] = action_class
+
+
+class RefactorRequest(BaseModel):
+ actions: List[Action]
+ dry_run: bool = False
diff --git a/lib/galaxy/workflow/run_request.py b/lib/galaxy/workflow/run_request.py
index a961a6f022b5..c08167b5ce42 100644
--- a/lib/galaxy/workflow/run_request.py
+++ b/lib/galaxy/workflow/run_request.py
@@ -482,7 +482,11 @@ def workflow_request_to_run_config(work_request_context, workflow_invocation):
for input_association in workflow_invocation.input_dataset_collections:
inputs[input_association.workflow_step_id] = input_association.dataset_collection
for input_association in workflow_invocation.input_step_parameters:
- inputs[input_association.workflow_step_id] = input_association.parameter_value
+ parameter_value = input_association.parameter_value
+ inputs[input_association.workflow_step_id] = parameter_value
+ step_label = input_association.workflow_step.label
+ if step_label and step_label not in replacement_dict:
+ replacement_dict[step_label] = str(parameter_value)
if copy_inputs_to_history is None:
raise exceptions.InconsistentDatabase("Failed to find copy_inputs_to_history parameter loading workflow_invocation from database.")
workflow_run_config = WorkflowRunConfig(
diff --git a/lib/galaxy_test/api/test_workflows.py b/lib/galaxy_test/api/test_workflows.py
index bcc26d45128a..083a707f1d01 100644
--- a/lib/galaxy_test/api/test_workflows.py
+++ b/lib/galaxy_test/api/test_workflows.py
@@ -472,6 +472,35 @@ def test_update_tags(self):
update_response = self._update_workflow(workflow['id'], workflow).json()
assert update_response['tags'] == []
+ def test_refactor(self):
+ workflow_id = self.workflow_populator.upload_yaml_workflow("""
+class: GalaxyWorkflow
+inputs:
+ test_input: data
+steps:
+ first_cat:
+ tool_id: cat
+ in:
+ input1: test_input
+""")
+ actions = [
+ {"action_type": "update_step_label", "step": {"order_index": 0}, "label": "new_label"},
+ ]
+ refactor_response = self.workflow_populator.refactor_workflow(workflow_id, actions, dry_run=True)
+ refactor_response.raise_for_status()
+ # it was with dry_run=True - so the result is unchanged...
+ refactor_response.json()["steps"]["0"]["label"] == "test_input"
+
+ refactor_response = self.workflow_populator.refactor_workflow(workflow_id, actions, dry_run=True, style="editor")
+ refactor_response.raise_for_status()
+ # it was with dry_run=True - so the result is unchanged...
+ refactor_response.json()["steps"]["0"]["label"] == "test_input"
+
+ refactor_response = self.workflow_populator.refactor_workflow(workflow_id, actions)
+ refactor_response.raise_for_status()
+ # this time dry_run was default of False, so the label is indeed changed
+ refactor_response.json()["steps"]["0"]["label"] == "new_label"
+
def test_update_no_tool_id(self):
workflow_object = self.workflow_populator.load_workflow(name="test_import")
upload_response = self.__test_upload(workflow=workflow_object)
@@ -1244,6 +1273,54 @@ def test_run_subworkflow_replacement_parameters(self):
details = self.dataset_populator.get_history_dataset_details(history_id)
assert details["name"] == "moocow suffix"
+ @skip_without_tool("create_2")
+ def test_placements_from_text_inputs(self):
+ with self.dataset_populator.test_history() as history_id:
+ run_def = """
+class: GalaxyWorkflow
+inputs: []
+steps:
+ create_2:
+ tool_id: create_2
+ state:
+ sleep_time: 0
+ outputs:
+ out_file1:
+ rename: "${replaceme} name"
+ out_file2:
+ rename: "${replaceme} name 2"
+test_data:
+ replacement_parameters:
+ replaceme: moocow
+"""
+
+ self._run_jobs(run_def, history_id=history_id)
+ details = self.dataset_populator.get_history_dataset_details(history_id)
+ assert details["name"] == "moocow name 2"
+
+ run_def = """
+class: GalaxyWorkflow
+inputs:
+ replaceme: text
+steps:
+ create_2:
+ tool_id: create_2
+ state:
+ sleep_time: 0
+ outputs:
+ out_file1:
+ rename: "${replaceme} name"
+ out_file2:
+ rename: "${replaceme} name 2"
+test_data:
+ replaceme:
+ value: moocow
+ type: raw
+"""
+ self._run_jobs(run_def, history_id=history_id)
+ details = self.dataset_populator.get_history_dataset_details(history_id)
+ assert details["name"] == "moocow name 2", details["name"]
+
@skip_without_tool("random_lines1")
def test_run_runtime_parameters_after_pause(self):
with self.dataset_populator.test_history() as history_id:
diff --git a/lib/galaxy_test/base/data/test_workflow_randomlines_legacy_params.ga b/lib/galaxy_test/base/data/test_workflow_randomlines_legacy_params.ga
new file mode 100644
index 000000000000..806668313f7d
--- /dev/null
+++ b/lib/galaxy_test/base/data/test_workflow_randomlines_legacy_params.ga
@@ -0,0 +1,55 @@
+{
+ "a_galaxy_workflow": "true",
+ "annotation": "",
+ "format-version": "0.1",
+ "name": "randomlines",
+ "steps": {
+ "0": {
+ "annotation": "",
+ "content_id": "random_lines1",
+ "errors": null,
+ "id": 0,
+ "input_connections": {},
+ "inputs": [
+ {
+ "description": "runtime parameter for tool Select random lines",
+ "name": "input"
+ }
+ ],
+ "label": null,
+ "name": "Select random lines",
+ "outputs": [
+ {
+ "name": "out_file1",
+ "type": "input"
+ }
+ ],
+ "position": {
+ "bottom": 403,
+ "height": 112,
+ "left": 910.5,
+ "right": 1110.5,
+ "top": 291,
+ "width": 200,
+ "x": 910.5,
+ "y": 291
+ },
+ "post_job_actions": {},
+ "tool_id": "random_lines1",
+ "tool_state": "{\"input\": {\"__class__\": \"RuntimeValue\"}, \"num_lines\": \"${num}\", \"seed_source\": {\"seed_source_selector\": \"set_seed\", \"__current_case__\": 1, \"seed\": \"${seed}\"}, \"__page__\": null, \"__rerun_remap_job_id__\": null}",
+ "tool_version": "2.0.2",
+ "type": "tool",
+ "uuid": "98e6ac0a-98bb-4e9e-9e1f-4013ceba2408",
+ "workflow_outputs": [
+ {
+ "label": null,
+ "output_name": "out_file1",
+ "uuid": "740450eb-cff1-44fc-b01d-05a133b16af7"
+ }
+ ]
+ }
+ },
+ "tags": [],
+ "uuid": "a3b70490-9ea0-4291-b4ed-48f90b7aef91",
+ "version": 2
+}
\ No newline at end of file
diff --git a/lib/galaxy_test/base/data/test_workflow_randomlines_legacy_params_mixed_types.ga b/lib/galaxy_test/base/data/test_workflow_randomlines_legacy_params_mixed_types.ga
new file mode 100644
index 000000000000..77ee878a0c69
--- /dev/null
+++ b/lib/galaxy_test/base/data/test_workflow_randomlines_legacy_params_mixed_types.ga
@@ -0,0 +1,55 @@
+{
+ "a_galaxy_workflow": "true",
+ "annotation": "",
+ "format-version": "0.1",
+ "name": "randomlines",
+ "steps": {
+ "0": {
+ "annotation": "",
+ "content_id": "random_lines1",
+ "errors": null,
+ "id": 0,
+ "input_connections": {},
+ "inputs": [
+ {
+ "description": "runtime parameter for tool Select random lines",
+ "name": "input"
+ }
+ ],
+ "label": null,
+ "name": "Select random lines",
+ "outputs": [
+ {
+ "name": "out_file1",
+ "type": "input"
+ }
+ ],
+ "position": {
+ "bottom": 403,
+ "height": 112,
+ "left": 910.5,
+ "right": 1110.5,
+ "top": 291,
+ "width": 200,
+ "x": 910.5,
+ "y": 291
+ },
+ "post_job_actions": {},
+ "tool_id": "random_lines1",
+ "tool_state": "{\"input\": {\"__class__\": \"RuntimeValue\"}, \"num_lines\": \"${mixed_param}\", \"seed_source\": {\"seed_source_selector\": \"set_seed\", \"__current_case__\": 1, \"seed\": \"${mixed_param}\"}, \"__page__\": null, \"__rerun_remap_job_id__\": null}",
+ "tool_version": "2.0.2",
+ "type": "tool",
+ "uuid": "98e6ac0a-98bb-4e9e-9e1f-4013ceba2408",
+ "workflow_outputs": [
+ {
+ "label": null,
+ "output_name": "out_file1",
+ "uuid": "740450eb-cff1-44fc-b01d-05a133b16af7"
+ }
+ ]
+ }
+ },
+ "tags": [],
+ "uuid": "a3b70490-9ea0-4291-b4ed-48f90b7aef91",
+ "version": 2
+}
\ No newline at end of file
diff --git a/lib/galaxy_test/base/populators.py b/lib/galaxy_test/base/populators.py
index c218968209b8..30d7b54dbc30 100644
--- a/lib/galaxy_test/base/populators.py
+++ b/lib/galaxy_test/base/populators.py
@@ -927,6 +927,18 @@ def update_workflow(self, workflow_id, workflow_object):
put_response = self.galaxy_interactor._put(raw_url, data=json.dumps(data))
return put_response
+ def refactor_workflow(self, workflow_id, actions, dry_run=None, style=None):
+ data = dict(
+ actions=actions,
+ )
+ if style is not None:
+ data["style"] = style
+ if dry_run is not None:
+ data["dry_run"] = dry_run
+ raw_url = 'workflows/%s/refactor' % workflow_id
+ put_response = self.galaxy_interactor._put(raw_url, data=json.dumps(data))
+ return put_response
+
@contextlib.contextmanager
def export_for_update(self, workflow_id):
workflow_object = self.download_workflow(workflow_id)
diff --git a/test/integration/test_workflow_refactoring.py b/test/integration/test_workflow_refactoring.py
new file mode 100644
index 000000000000..2b3b306e48ea
--- /dev/null
+++ b/test/integration/test_workflow_refactoring.py
@@ -0,0 +1,273 @@
+from galaxy.managers.context import ProvidesAppContext
+from galaxy.workflow.refactor.schema import RefactorRequest
+from galaxy_test.base.populators import (
+ WorkflowPopulator,
+)
+from galaxy_test.driver import integration_util
+
+
+class WorkflowRefactoringIntegrationTestCase(integration_util.IntegrationTestCase):
+
+ framework_tool_and_types = True
+
+ def setUp(self):
+ super().setUp()
+ self.workflow_populator = WorkflowPopulator(self.galaxy_interactor)
+
+ def test_basic_refactoring_types(self):
+ self.workflow_populator.upload_yaml_workflow("""
+class: GalaxyWorkflow
+inputs:
+ test_input: data
+steps:
+ first_cat:
+ tool_id: cat
+ in:
+ input1: test_input
+""")
+
+ actions = [
+ {"action_type": "update_name", "name": "my cool new name"},
+ ]
+ self._refactor_without_errors(actions)
+ assert self._latest_workflow.stored_workflow.name == "my cool new name"
+
+ actions = [
+ {"action_type": "update_annotation", "annotation": "my cool new annotation"},
+ ]
+ self._refactor_without_errors(actions)
+ # TODO: test annotation change...
+
+ actions = [
+ {"action_type": "update_license", "license": "AFL-3.0"},
+ ]
+ self._refactor_without_errors(actions)
+ assert self._latest_workflow.license == "AFL-3.0"
+
+ actions = [
+ {"action_type": "update_creator", "creator": [{"class": "Person", "name": "Mary"}]},
+ ]
+ self._refactor_without_errors(actions)
+ assert self._latest_workflow.creator_metadata[0]["class"] == "Person"
+ assert self._latest_workflow.creator_metadata[0]["name"] == "Mary"
+
+ actions = [
+ {"action_type": "update_report", "report": {"markdown": "my report..."}}
+ ]
+ self._refactor_without_errors(actions)
+ assert self._latest_workflow.reports_config["markdown"] == "my report..."
+
+ assert self._latest_workflow.step_by_index(0).label == "test_input"
+ actions = [
+ {"action_type": "update_step_label", "step": {"order_index": 0}, "label": "new_label"},
+ ]
+ self._refactor_without_errors(actions)
+ assert self._latest_workflow.step_by_index(0).label == "new_label"
+
+ actions = [
+ {"action_type": "update_step_position", "step": {"order_index": 0}, "position": {"left": 3, "top": 5}},
+ ]
+ self._refactor_without_errors(actions)
+ assert self._latest_workflow.step_by_index(0).label == "new_label"
+ assert self._latest_workflow.step_by_index(0).position["left"] == 3
+ assert self._latest_workflow.step_by_index(0).position["top"] == 5
+
+ # Build raw steps...
+ actions = [
+ {"action_type": "add_step", "type": "parameter_input", "label": "new_param", "tool_state": {"parameter_type": "text"}, "position": {"left": 10, "top": 50}},
+ ]
+ self._refactor_without_errors(actions)
+ assert self._latest_workflow.step_by_label("new_param").label == "new_param"
+ assert self._latest_workflow.step_by_label("new_param").tool_inputs.get("optional", False) is False
+ assert self._latest_workflow.step_by_label("new_param").position["left"] == 10
+ assert self._latest_workflow.step_by_label("new_param").position["top"] == 50
+
+ # Cleaner syntax for defining inputs...
+ actions = [
+ {"action_type": "add_input", "type": "text", "label": "new_param2", "optional": True, "position": {"top": 1, "left": 2}},
+ ]
+ self._refactor_without_errors(actions)
+ assert self._latest_workflow.step_by_label("new_param2").label == "new_param2"
+ assert self._latest_workflow.step_by_label("new_param2").tool_inputs.get("optional", False) is True
+ assert self._latest_workflow.step_by_label("new_param2").position["top"] == 1
+ assert self._latest_workflow.step_by_label("new_param2").position["left"] == 2
+
+ assert len(self._latest_workflow.step_by_label("first_cat").inputs) == 1
+ actions = [
+ {
+ "action_type": "disconnect",
+ "input": {"label": "first_cat", "input_name": "input1"},
+ "output": {"label": "new_label"},
+ }
+ ]
+ self._refactor_without_errors(actions)
+ assert len(self._latest_workflow.step_by_label("first_cat").inputs) == 0
+
+ actions = [
+ {
+ "action_type": "connect",
+ "input": {"label": "first_cat", "input_name": "input1"},
+ "output": {"label": "new_label"},
+ }
+ ]
+ self._refactor_without_errors(actions)
+ assert len(self._latest_workflow.step_by_label("first_cat").inputs) == 1
+
+ # Re-disconnect so we can test extract_input
+ actions = [
+ {
+ "action_type": "disconnect",
+ "input": {"label": "first_cat", "input_name": "input1"},
+ "output": {"label": "new_label"},
+ }
+ ]
+ self._refactor_without_errors(actions)
+
+ # try to create an input for first_cat/input1 automatically
+ actions = [
+ {
+ "action_type": "extract_input",
+ "input": {"label": "first_cat", "input_name": "input1"},
+ "label": "extracted_input",
+ }
+ ]
+ self._refactor_without_errors(actions)
+ assert self._latest_workflow.step_by_label("extracted_input")
+ assert len(self._latest_workflow.step_by_label("first_cat").inputs) == 1
+
+ def test_refactoring_legacy_parameters(self):
+ wf = self.workflow_populator.load_workflow_from_resource("test_workflow_randomlines_legacy_params")
+ self.workflow_populator.create_workflow(wf)
+ actions = [
+ {"action_type": "extract_legacy_parameter", "name": "seed"},
+ {"action_type": "extract_legacy_parameter", "name": "num", "label": "renamed_num"},
+ ]
+ self._refactor_without_errors(actions)
+ assert self._latest_workflow.step_by_label("seed").tool_inputs["parameter_type"] == "text"
+ assert self._latest_workflow.step_by_label("renamed_num").tool_inputs["parameter_type"] == "integer"
+ random_lines_state = self._latest_workflow.step_by_index(2).tool_inputs
+ assert "num_lines" in random_lines_state
+ num_lines = random_lines_state["num_lines"]
+ assert isinstance(num_lines, dict)
+ assert "__class__" in num_lines
+ assert num_lines["__class__"] == 'ConnectedValue'
+ assert "seed_source" in random_lines_state
+ seed_source = random_lines_state["seed_source"]
+ assert isinstance(seed_source, dict)
+ assert "seed" in seed_source
+ seed = seed_source["seed"]
+ assert isinstance(seed, dict)
+ assert "__class__" in seed
+ assert seed["__class__"] == 'ConnectedValue'
+
+ # cannot handle mixed, incompatible types on the inputs though
+ wf = self.workflow_populator.load_workflow_from_resource("test_workflow_randomlines_legacy_params_mixed_types")
+ self.workflow_populator.create_workflow(wf)
+ actions = [
+ {"action_type": "extract_legacy_parameter", "name": "mixed_param"},
+ ]
+ expected_exception = None
+ try:
+ self._refactor(actions)
+ except Exception as e:
+ expected_exception = e
+ assert expected_exception
+ assert "input types" in str(expected_exception)
+
+ def test_refactoring_legacy_parameters_without_tool_state(self):
+ # test parameters used in PJA without being used in tool state.
+ # These will work fine with the simplified workflow UI, but should probably
+ # be formalized and assigned a unique label and informative annotation.
+ self.workflow_populator.upload_yaml_workflow("""
+class: GalaxyWorkflow
+inputs:
+ test_input: data
+steps:
+ first_cat:
+ tool_id: cat
+ in:
+ input1: test_input
+ outputs:
+ out_file1:
+ rename: "${pja_only_param} name"
+""")
+ actions = [
+ {"action_type": "extract_legacy_parameter", "name": "pja_only_param"},
+ ]
+ self._refactor_without_errors(actions)
+ assert self._latest_workflow.step_by_label("pja_only_param").tool_inputs["parameter_type"] == "text"
+
+ def test_refactoring_legacy_parameters_without_tool_state_relabel(self):
+ # same thing as above, but apply relabeling and ensure PJA gets updated.
+ self.workflow_populator.upload_yaml_workflow("""
+class: GalaxyWorkflow
+inputs:
+ test_input: data
+steps:
+ first_cat:
+ tool_id: cat
+ in:
+ input1: test_input
+ outputs:
+ out_file1:
+ rename: "${pja_only_param} name"
+""")
+ actions = [
+ {"action_type": "extract_legacy_parameter", "name": "pja_only_param", "label": "new_label"},
+ ]
+ self._refactor_without_errors(actions)
+ assert self._latest_workflow.step_by_label("new_label").tool_inputs["parameter_type"] == "text"
+ pjas = self._latest_workflow.step_by_label("first_cat").post_job_actions
+ assert len(pjas) == 1
+ pja = pjas[0]
+ assert "newname" in pja.action_arguments
+ assert "${new_label}" in pja.action_arguments["newname"]
+
+ def test_removing_unlabeled_workflow_outputs(self):
+ wf = self.workflow_populator.load_workflow_from_resource("test_workflow_randomlines_legacy_params")
+ self.workflow_populator.create_workflow(wf)
+ only_step = self._latest_workflow.step_by_index(0)
+ assert len(only_step.workflow_outputs) == 1
+ actions = [
+ {"action_type": "remove_unlabeled_workflow_outputs"},
+ ]
+ self._refactor_without_errors(actions)
+ only_step = self._latest_workflow.step_by_index(0)
+ assert len(only_step.workflow_outputs) == 0
+
+ def _refactor_without_errors(self, actions):
+ updated, errors = self._refactor(actions)
+ assert updated
+ assert not errors
+ return updated
+
+ def _refactor(self, actions):
+ user = self._app.model.session.query(self._app.model.User).order_by(self._app.model.User.id.desc()).limit(1).one()
+ mock_trans = MockTrans(self._app, user)
+ return self._manager.refactor(
+ mock_trans,
+ self._most_recent_stored_workflow,
+ RefactorRequest(**{"actions": actions})
+ )
+
+ @property
+ def _manager(self):
+ return self._app.workflow_contents_manager
+
+ @property
+ def _most_recent_stored_workflow(self):
+ app = self._app
+ model = app.model
+ return app.model.session.query(app.model.StoredWorkflow).order_by(model.StoredWorkflow.id.desc()).limit(1).one()
+
+ @property
+ def _latest_workflow(self):
+ return self._most_recent_stored_workflow.latest_workflow
+
+
+class MockTrans(ProvidesAppContext):
+
+ def __init__(self, app, user):
+ self.app = app
+ self.user = user
+ self.history = None
diff --git a/test/unit/workflows/test_refactor_models.py b/test/unit/workflows/test_refactor_models.py
new file mode 100644
index 000000000000..9bb65aa2ab46
--- /dev/null
+++ b/test/unit/workflows/test_refactor_models.py
@@ -0,0 +1,59 @@
+from galaxy.workflow.refactor.schema import RefactorRequest
+
+
+def test_root_list():
+ request = {
+ "actions": [
+ {"action_type": "add_step", "label": "foobar", "type": "tool", "tool_state": {"a": 6}},
+ {"action_type": "update_step_label", "label": "new_label", "step": {"order_index": 5}},
+ {"action_type": "update_step_label", "label": "new_label", "step": {"label": "cool_label"}},
+ {"action_type": "connect", "input": {"input_name": "foobar", "order_index": 6}, "output": {"label": "cow"}},
+ {"action_type": "disconnect", "input": {"input_name": "foobar2", "label": "foolabel"}, "output": {"order_index": 7, "output_name": "o_name"}},
+ {"action_type": "add_input", "type": "data"},
+ {"action_type": "add_input", "type": "integer", "optional": True, "default": 5},
+ {"action_type": "extract_input", "input": {"order_index": 5, "input_name": "foobar"}},
+ {"action_type": "extract_legacy_parameter", "name": "foo"},
+ {"action_type": "extract_legacy_parameter", "name": "foo", "label": "new_foo"},
+ ],
+ }
+ ar = RefactorRequest(**request)
+ actions = ar.actions
+
+ a0 = actions[0]
+ assert a0.tool_state["a"] == 6
+ assert a0.label == "foobar"
+
+ a1 = actions[1]
+ assert a1.step.order_index == 5
+ a2 = actions[2]
+ assert a2.step.label == "cool_label"
+
+ a3 = actions[3]
+ # Verify it sets default output_name
+ assert a3.output.output_name == "output"
+ assert a3.input.input_name == "foobar"
+
+ a4 = actions[4]
+ assert a4.output.output_name == "o_name"
+ assert a4.input.input_name == "foobar2"
+ assert a4.input.label == "foolabel"
+
+ a5 = actions[5]
+ assert a5.type == "data"
+ assert a5.optional is False
+
+ a6 = actions[6]
+ assert a6.optional is True
+ assert a6.default == 5
+
+ a7 = actions[7]
+ assert a7.input.order_index == 5
+ assert a7.input.input_name == "foobar"
+
+ a8 = actions[8]
+ assert a8.name == "foo"
+ assert a8.label is None
+
+ a9 = actions[9]
+ assert a9.name == "foo"
+ assert a9.label == "new_foo"