+
+
diff --git a/client/galaxy/scripts/components/Sharing.vue b/client/galaxy/scripts/components/Sharing.vue
index 84ffbde9e3c8..c7f75a77e56f 100644
--- a/client/galaxy/scripts/components/Sharing.vue
+++ b/client/galaxy/scripts/components/Sharing.vue
@@ -116,7 +116,7 @@
The following users will see this {{ model_class_lc }} in their {{ model_class_lc }} list
and will be able to view, import and run it.
-
+
{
}
};
- // Run bef oreEach as async so the lifecycle methods can run
+ // Run before each as async so the lifecycle methods can run
beforeEach(async () => {
// TODO: this mocking mechanism is no good.
diff --git a/client/galaxy/scripts/components/Tool.vue b/client/galaxy/scripts/components/Tool.vue
index 1ee4653c9bf4..8da1ae1ce5ec 100644
--- a/client/galaxy/scripts/components/Tool.vue
+++ b/client/galaxy/scripts/components/Tool.vue
@@ -9,9 +9,11 @@
:class="['tool-link', tool.id]"
>
-
+
{{ label }}
diff --git a/client/galaxy/scripts/components/Toolshed/Categories.vue b/client/galaxy/scripts/components/Toolshed/Categories.vue
index 27e034ccedd1..2439b20f4579 100644
--- a/client/galaxy/scripts/components/Toolshed/Categories.vue
+++ b/client/galaxy/scripts/components/Toolshed/Categories.vue
@@ -5,7 +5,7 @@
Loading categories...
-
+
diff --git a/client/galaxy/scripts/components/admin/Invocations.vue b/client/galaxy/scripts/components/admin/Invocations.vue
new file mode 100644
index 000000000000..2bdb85c71a52
--- /dev/null
+++ b/client/galaxy/scripts/components/admin/Invocations.vue
@@ -0,0 +1,110 @@
+
+
+
+ Worklfow Invocations
+
+
+
+ Workflow invocations that are still being scheduled are displayed on this page.
+
+
+
+
+ Loading workflow invocation job data
+
+
+
+ There are no scheduling workflow invocations to show currently.
+
+
+
+ These invocations are not finished scheduling - one or more steps are waiting on others steps to be
+ complete before the full structure of the jobs in the workflow can be determined.
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/client/galaxy/scripts/entry/admin/AdminRouter.js b/client/galaxy/scripts/entry/admin/AdminRouter.js
index ca0edba64fd0..f48519d78446 100644
--- a/client/galaxy/scripts/entry/admin/AdminRouter.js
+++ b/client/galaxy/scripts/entry/admin/AdminRouter.js
@@ -8,6 +8,7 @@ import Router from "layout/router";
import DataTables from "components/admin/DataTables.vue";
import DataTypes from "components/admin/DataTypes.vue";
import Jobs from "components/admin/Jobs.vue";
+import Invocations from "components/admin/Invocations.vue";
import DataManagerView from "components/admin/DataManager/DataManagerView.vue";
import DataManagerRouter from "components/admin/DataManager/DataManagerRouter.vue";
import Register from "components/login/Register.vue";
@@ -15,6 +16,7 @@ import ErrorStack from "components/admin/ErrorStack.vue";
import DisplayApplications from "components/admin/DisplayApplications.vue";
import Toolshed from "components/Toolshed/Index.vue";
import Vue from "vue";
+import store from "store";
export const getAdminRouter = (Galaxy, options) => {
const galaxyRoot = getAppRoot();
@@ -37,6 +39,7 @@ export const getAdminRouter = (Galaxy, options) => {
"(/)admin/data_tables": "show_data_tables",
"(/)admin/data_types": "show_data_types",
"(/)admin/jobs": "show_jobs",
+ "(/)admin/invocations": "show_invocations",
"(/)admin/data_manager*path": "show_data_manager",
"*notFound": "not_found"
},
@@ -112,7 +115,7 @@ export const getAdminRouter = (Galaxy, options) => {
const instance = Vue.extend(component);
const vm = document.createElement("div");
this.page.display(vm);
- new instance(props).$mount(vm);
+ new instance({ store, props }).$mount(vm);
},
show_data_tables: function() {
@@ -127,6 +130,10 @@ export const getAdminRouter = (Galaxy, options) => {
this._display_vue_helper(Jobs);
},
+ show_invocations: function() {
+ this._display_vue_helper(Invocations);
+ },
+
show_error_stack: function() {
this._display_vue_helper(ErrorStack);
},
diff --git a/client/galaxy/scripts/entry/analysis/AnalysisRouter.js b/client/galaxy/scripts/entry/analysis/AnalysisRouter.js
index f9cfce3c78c8..c39f7ba9eb6d 100644
--- a/client/galaxy/scripts/entry/analysis/AnalysisRouter.js
+++ b/client/galaxy/scripts/entry/analysis/AnalysisRouter.js
@@ -24,8 +24,8 @@ import Tours from "mvc/tours";
import GridView from "mvc/grid/grid-view";
import EntryPointGridView from "mvc/entrypoints/view";
import GridShared from "mvc/grid/grid-shared";
-import Workflows from "mvc/workflow/workflow";
-import WorkflowImport from "components/WorkflowImport.vue";
+import WorkflowImport from "components/Workflow/WorkflowImport.vue";
+import WorkflowList from "components/Workflow/WorkflowList.vue";
import HistoryImport from "components/HistoryImport.vue";
import HistoryView from "components/HistoryView.vue";
import WorkflowInvocationReport from "components/WorkflowInvocationReport.vue";
@@ -307,7 +307,10 @@ export const getAnalysisRouter = Galaxy =>
},
show_workflows: function() {
- this.page.display(new Workflows.View());
+ const workflowListInstance = Vue.extend(WorkflowList);
+ const vm = document.createElement("div");
+ this.page.display(vm);
+ new workflowListInstance().$mount(vm);
},
show_workflows_create: function() {
diff --git a/client/galaxy/scripts/entry/panels/admin-panel.js b/client/galaxy/scripts/entry/panels/admin-panel.js
index f3070e35a96b..b77a2d96c680 100644
--- a/client/galaxy/scripts/entry/panels/admin-panel.js
+++ b/client/galaxy/scripts/entry/panels/admin-panel.js
@@ -36,11 +36,17 @@ const AdminPanel = Backbone.View.extend({
id: "admin-link-display-applications"
},
{
- title: _l("Manage jobs"),
+ title: _l("Jobs"),
url: "admin/jobs",
target: "__use_router__",
id: "admin-link-jobs"
},
+ {
+ title: _l("Workflow invocations"),
+ url: "admin/invocations",
+ target: "__use_router__",
+ id: "admin-link-invocations"
+ },
{
title: _l("Local data"),
url: "admin/data_manager",
diff --git a/client/galaxy/scripts/entry/panels/tool-panel.js b/client/galaxy/scripts/entry/panels/tool-panel.js
index 83f7854c9b01..a7ba8cf85aaa 100644
--- a/client/galaxy/scripts/entry/panels/tool-panel.js
+++ b/client/galaxy/scripts/entry/panels/tool-panel.js
@@ -128,21 +128,20 @@ const ToolPanel = Backbone.View.extend({
{
title: _l("All workflows"),
href: `${appRoot}workflows/list`,
- id: 'list',
+ id: "list"
},
...this.stored_workflow_menu_entries.map(menuEntry => {
return {
title: menuEntry["stored_workflow"]["name"],
href: `${appRoot}workflows/run?id=${menuEntry["encoded_stored_workflow_id"]}`,
- id: menuEntry["encoded_stored_workflow_id"],
+ id: menuEntry["encoded_stored_workflow_id"]
};
})
]
};
},
- render: function() {
- },
+ render: function() {},
/** build a link to one tool */
_templateTool: function(tool) {
diff --git a/client/galaxy/scripts/i18n.js b/client/galaxy/scripts/i18n.js
index 2983f1aad3f3..39133a94cb4e 100644
--- a/client/galaxy/scripts/i18n.js
+++ b/client/galaxy/scripts/i18n.js
@@ -3,7 +3,7 @@
* Released under MIT license, http://github.com/requirejs/i18n/LICENSE
*/
/*jslint regexp: true */
-/*global require: false, navigator: false, define: false */
+/*global define: false */
/**
* This plugin handles i18n! prefixed modules. It does the following:
@@ -73,7 +73,10 @@
function mixin(target, source, force) {
var prop;
for (prop in source) {
- if (source.hasOwnProperty(prop) && (!target.hasOwnProperty(prop) || force)) {
+ if (
+ Object.prototype.hasOwnProperty.call(source, prop) &&
+ (!Object.prototype.hasOwnProperty.call(target, prop) || force)
+ ) {
target[prop] = source[prop];
} else if (typeof source[prop] === "object") {
if (!target[prop] && source[prop]) {
diff --git a/client/galaxy/scripts/mvc/base-mvc.js b/client/galaxy/scripts/mvc/base-mvc.js
index 6d2fdca91091..102fbf18f418 100644
--- a/client/galaxy/scripts/mvc/base-mvc.js
+++ b/client/galaxy/scripts/mvc/base-mvc.js
@@ -141,7 +141,7 @@ var SessionStorageModel = Backbone.Model.extend({
/** T/F whether sessionStorage contains the model's id (data is present) */
isNew: function() {
- return !sessionStorage.hasOwnProperty(this.id);
+ return !Object.prototype.hasOwnProperty.call(sessionStorage, this.id);
},
_log: function() {
diff --git a/client/galaxy/scripts/mvc/collection/list-collection-creator.js b/client/galaxy/scripts/mvc/collection/list-collection-creator.js
index a613f5cd13d1..1de9d6eae87f 100644
--- a/client/galaxy/scripts/mvc/collection/list-collection-creator.js
+++ b/client/galaxy/scripts/mvc/collection/list-collection-creator.js
@@ -249,7 +249,7 @@ var ListCollectionCreator = Backbone.View.extend(BASE_MVC.LoggableMixin)
/** add ids to dataset objs in initial list if none */
_ensureElementIds: function() {
this.workingElements.forEach(element => {
- if (!element.hasOwnProperty("id")) {
+ if (!Object.prototype.hasOwnProperty.call(element, "id")) {
element.id = _.uniqueId();
}
});
@@ -296,7 +296,7 @@ var ListCollectionCreator = Backbone.View.extend(BASE_MVC.LoggableMixin)
var existingNames = {};
this.workingElements.forEach(element => {
var currName = element.name;
- while (existingNames.hasOwnProperty(currName)) {
+ while (Object.prototype.hasOwnProperty.call(existingNames, currName)) {
currName = `${element.name} (${counter})`;
counter += 1;
if (counter >= SAFETY) {
diff --git a/client/galaxy/scripts/mvc/collection/list-of-pairs-collection-creator.js b/client/galaxy/scripts/mvc/collection/list-of-pairs-collection-creator.js
index 41e7ade12de0..ea9e6746adbf 100644
--- a/client/galaxy/scripts/mvc/collection/list-of-pairs-collection-creator.js
+++ b/client/galaxy/scripts/mvc/collection/list-of-pairs-collection-creator.js
@@ -341,7 +341,7 @@ var PairedCollectionCreator = Backbone.View.extend(baseMVC.LoggableMixin)
/** add ids to dataset objs in initial list if none */
_ensureIds: function() {
this.initialList.forEach(dataset => {
- if (!dataset.hasOwnProperty("id")) {
+ if (!Object.prototype.hasOwnProperty.call(dataset, "id")) {
dataset.id = _.uniqueId();
}
});
diff --git a/client/galaxy/scripts/mvc/dataset/dataset-choice.js b/client/galaxy/scripts/mvc/dataset/dataset-choice.js
index cf5d709442d7..81aeded717a4 100644
--- a/client/galaxy/scripts/mvc/dataset/dataset-choice.js
+++ b/client/galaxy/scripts/mvc/dataset/dataset-choice.js
@@ -29,7 +29,7 @@ function _filterDatasetJSON(datasetJSON, where, datasetsOnly) {
//TODO: replace with _.matches (underscore 1.6.0)
function matches(obj, toMatch) {
for (var key in toMatch) {
- if (toMatch.hasOwnProperty(key)) {
+ if (Object.prototype.hasOwnProperty.call(toMatch, key)) {
if (obj[key] !== toMatch[key]) {
return false;
}
diff --git a/client/galaxy/scripts/mvc/entrypoints/poll.js b/client/galaxy/scripts/mvc/entrypoints/poll.js
index d23caee1eb2b..a141eaddcfe6 100644
--- a/client/galaxy/scripts/mvc/entrypoints/poll.js
+++ b/client/galaxy/scripts/mvc/entrypoints/poll.js
@@ -10,7 +10,6 @@ export const clearPolling = () => {
export const pollUntilActive = (onUpdate, onError, params) => {
clearPolling();
const url = getAppRoot() + `api/entry_points`;
- console.log(params);
axios
.get(url, { params: params })
.then(response => {
diff --git a/client/galaxy/scripts/mvc/history/hdca-li.js b/client/galaxy/scripts/mvc/history/hdca-li.js
index 5125126facd3..0035fad644c9 100644
--- a/client/galaxy/scripts/mvc/history/hdca-li.js
+++ b/client/galaxy/scripts/mvc/history/hdca-li.js
@@ -4,6 +4,7 @@ import DC_LI from "mvc/collection/collection-li";
import DC_VIEW from "mvc/collection/collection-view";
import _l from "utils/localization";
import { mountNametags } from "components/Nametags";
+import { mountCollectionJobStates } from "components/JobStates";
//==============================================================================
var _super = DC_LI.DCListItemView;
@@ -74,57 +75,12 @@ var HDCAListItemView = _super.extend(
state = this.model.get("populated_state") ? STATES.OK : STATES.RUNNING;
}
this.$el.addClass(`state-${state}`);
- var stateDescription = this.stateDescription();
- this.$(".state-description").html(stateDescription);
+ const collection = this.model;
+ const stateContainer = this.$el.find(".state-description")[0];
+ mountCollectionJobStates({ jobStatesSummary, collection }, stateContainer);
return this.$el;
},
- stateDescription: function() {
- var collection = this.model;
- var jobStateSource = collection.get("job_source_type");
- var collectionTypeDescription = DC_VIEW.collectionTypeDescription(collection);
- var simpleDescription = DC_VIEW.collectionDescription(collection);
- var jobStatesSummary = collection.jobStatesSummary;
- if (!jobStateSource || jobStateSource == "Job") {
- return simpleDescription;
- } else if (!jobStatesSummary || !jobStatesSummary.hasDetails()) {
- return `
-
- Loading job data for ${collectionTypeDescription}...
-
-
`;
- } else {
- var isNew = jobStatesSummary.new();
- var jobCount = isNew ? null : jobStatesSummary.jobCount();
- if (isNew) {
- return `
-
- Creating jobs...
-
-
`;
- } else if (jobStatesSummary.errored()) {
- var errorCount = jobStatesSummary.numInError();
- return `a ${collectionTypeDescription} with ${errorCount} / ${jobCount} jobs in error`;
- } else if (jobStatesSummary.terminal()) {
- return simpleDescription;
- } else {
- var running = jobStatesSummary.states()["running"] || 0;
- var ok = jobStatesSummary.states()["ok"] || 0;
- var okPercent = ok / (jobCount * 1.0);
- var runningPercent = running / (jobCount * 1.0);
- var otherPercent = 1.0 - okPercent - runningPercent;
- var jobsStr = jobCount && jobCount > 1 ? `${jobCount} jobs` : `a job`;
- return `
-
- ${jobsStr} generating a ${collectionTypeDescription}
-
-
-
-
`;
- }
- }
- },
-
// ......................................................................... misc
/** String representation */
toString: function() {
diff --git a/client/galaxy/scripts/mvc/history/history-view-edit.js b/client/galaxy/scripts/mvc/history/history-view-edit.js
index 1531e1f6a942..0084805a96e0 100644
--- a/client/galaxy/scripts/mvc/history/history-view-edit.js
+++ b/client/galaxy/scripts/mvc/history/history-view-edit.js
@@ -526,7 +526,7 @@ var HistoryViewEdit = _super.extend(
var $dropTarget = this._renderDropTarget();
this.$list().before([this._renderDropTargetHelp(), $dropTarget]);
for (var evName in dropHandlers) {
- if (dropHandlers.hasOwnProperty(evName)) {
+ if (Object.prototype.hasOwnProperty.call(dropHandlers, evName)) {
$dropTarget.get(0).addEventListener(evName, dropHandlers[evName]);
}
}
@@ -556,7 +556,7 @@ var HistoryViewEdit = _super.extend(
this.dropTarget = false;
var dropTarget = this.$(".history-drop-target").get(0);
for (var evName in this._dropHandlers) {
- if (this._dropHandlers.hasOwnProperty(evName)) {
+ if (Object.prototype.hasOwnProperty.call(this._dropHandlers, evName)) {
dropTarget.off(evName, this._dropHandlers[evName]);
}
}
diff --git a/client/galaxy/scripts/mvc/library/library-dataset-view.js b/client/galaxy/scripts/mvc/library/library-dataset-view.js
index 2c662815ddf4..44756043871e 100644
--- a/client/galaxy/scripts/mvc/library/library-dataset-view.js
+++ b/client/galaxy/scripts/mvc/library/library-dataset-view.js
@@ -117,7 +117,7 @@ var LibraryDatasetView = Backbone.View.extend({
if (container) {
const str_tags = this.model.get("tags");
if (typeof str_tags === "string") {
- this.model.set({ tags: str_tags.split(', ') });
+ this.model.set({ tags: str_tags.split(", ") });
}
const { id, model_class, tags } = this.model.attributes;
const storeKey = `${model_class}-${id}`;
diff --git a/client/galaxy/scripts/mvc/library/library-folderrow-view.js b/client/galaxy/scripts/mvc/library/library-folderrow-view.js
index 337b25cb700f..03f02c4ed3ee 100644
--- a/client/galaxy/scripts/mvc/library/library-folderrow-view.js
+++ b/client/galaxy/scripts/mvc/library/library-folderrow-view.js
@@ -81,7 +81,7 @@ var FolderRowView = Backbone.View.extend({
if (container) {
const str_tags = this.model.get("tags");
if (typeof str_tags === "string") {
- this.model.set({ tags: str_tags.split(', ') });
+ this.model.set({ tags: str_tags.split(", ") });
}
const { id, model_class, tags } = this.model.attributes;
const storeKey = `${model_class}-${id}`;
diff --git a/client/galaxy/scripts/mvc/tag.js b/client/galaxy/scripts/mvc/tag.js
deleted file mode 100644
index 6e9ad498d88d..000000000000
--- a/client/galaxy/scripts/mvc/tag.js
+++ /dev/null
@@ -1,197 +0,0 @@
-import _ from "underscore";
-import Backbone from "backbone";
-import { getAppRoot } from "onload/loadConfig";
-import { getGalaxyInstance } from "app";
-import baseMVC from "mvc/base-mvc";
-import _l from "utils/localization";
-
-// =============================================================================
-/** A view on any model that has a 'tags' attribute (a list of tag strings)
- * Incorporates the select2 jQuery plugin for tags display/editing:
- * http://ivaynberg.github.io/select2/
- */
-var TagsEditor = Backbone.View.extend(baseMVC.LoggableMixin)
- .extend(baseMVC.HiddenUntilActivatedViewMixin)
- .extend({
- tagName: "div",
- className: "tags-display",
- select_width: "100%",
- events: {},
-
- /** Set up listeners, parse options */
- initialize: function(options) {
- //console.debug( this, options );
- // only listen to the model only for changes to tags - re-render
- this.show_editor = false;
- if (options.usePrompt === false) {
- this.label = "";
- } else {
- this.label = ``;
- }
- this.workflow_mode = options.workflow_mode || false;
- if (this.workflow_mode) {
- this.events.click = "showEditor";
- this.events.keydown = "keydownHandler";
- }
- this.hiddenUntilActivated(options.$activator, options);
- },
-
- /** Build the DOM elements, call select to on the created input, and set up behaviors */
- render: function() {
- var self = this;
- if (this.workflow_mode) {
- this.$el.html(this._workflowTemplate());
- } else {
- this.$el.html(this._defaultTemplate());
- }
- this.$input().select2({
- placeholder: "Add tags",
- width: this.workflow_mode ? this.width : this.select_width,
- tags: function() {
- // initialize possible tags in the dropdown based on all the tags the user has used so far
- return self._getTagsUsed();
- }
- });
-
- this._setUpBehaviors();
- return this;
- },
-
- _hashToName: function(tag) {
- if (tag.startsWith("#")) {
- return `name:${tag.slice(1)}`;
- }
- return tag;
- },
-
- _nameToHash: function(tag) {
- if (tag.startsWith("name:")) {
- tag = `#${tag.slice(5)}`;
- }
- return tag;
- },
-
- /** @returns {String} the html text used to build the view's DOM */
- _defaultTemplate: function() {
- return [this.label, this._renderEditor()].join("");
- },
-
- _workflowTemplate: function() {
- // Shows labels by default, event handler controls whether we show tags or editor
- return [this.show_editor ? this._renderEditor() : this._renderTags()].join(" ");
- },
-
- keydownHandler: function(e) {
- switch (e.which) {
- // esc
- case 27:
- // hide the tag editor when pressing escape
- this.hideEditor();
- break;
- }
- },
-
- showEditor: function() {
- this.show_editor = true;
- this.render();
- },
-
- hideEditor: function() {
- this.show_editor = false;
- this.render();
- },
-
- _renderEditor: function() {
- // set up initial tags by adding as CSV to input vals (necc. to init select2)
- return ``;
- },
-
- _renderTags: function() {
- var tags = this.model.get("tags");
- var addButton = `${getAppRoot()}static/images/fugue/tag--plus.png`;
- var renderedArray = [];
- _.each(tags, tag => {
- tag = tag.indexOf("name:") == 0 ? tag.slice(5) : tag;
- var renderString = `${tag}`;
- renderedArray.push(renderString);
- });
- if (renderedArray.length === 0) {
- // If there are no tags to render we just show the add-tag-button
- renderedArray.push(
- ``
- );
- }
- return renderedArray.join(" ");
- },
-
- /** @returns {String} the sorted, comma-separated tags from the model */
- tagsToCSV: function() {
- var self = this;
- var tagsArray = this.model.get("tags");
- if (!_.isArray(tagsArray) || _.isEmpty(tagsArray)) {
- return "";
- }
- return tagsArray
- .map(tag => _.escape(self._nameToHash(tag)))
- .sort()
- .join(",");
- },
-
- /** @returns {jQuery} the input for this view */
- $input: function() {
- return this.$el.find("input.tags-input");
- },
-
- /** @returns {String[]} all tags used by the current user */
- _getTagsUsed: function() {
- const Galaxy = getGalaxyInstance();
- var self = this;
- return _.map(Galaxy.user.get("tags_used"), self._nameToHash);
- },
-
- /** set up any event listeners on the view's DOM (mostly handled by select2) */
- _setUpBehaviors: function() {
- var self = this;
- this.$input().on("change", event => {
- // Modify any 'hashtag' 'nametags'
- event.val = _.map(event.val, self._hashToName);
- // save the model's tags in either remove or added event
- self.model.save({ tags: event.val });
- // if it's new, add the tag to the users tags
- if (event.added) {
- //??: solve weird behavior in FF on test.galaxyproject.org where
- // event.added.text is string object: 'String{ 0="o", 1="n", 2="e" }'
- self._addNewTagToTagsUsed(`${event.added.text}`);
- }
- });
- },
-
- /** add a new tag (if not already there) to the list of all tags used by the user
- * @param {String} newTag the tag to add to the list of used
- */
- _addNewTagToTagsUsed: function(newTag) {
- const Galaxy = getGalaxyInstance();
- var tagsUsed = Galaxy.user.get("tags_used");
- if (!_.contains(tagsUsed, newTag)) {
- tagsUsed.push(newTag);
- tagsUsed.sort();
- Galaxy.user.set("tags_used", tagsUsed);
- }
- },
-
- /** shut down event listeners and remove this view's DOM */
- remove: function() {
- this.$input.off();
- this.stopListening(this.model);
- Backbone.View.prototype.remove.call(this);
- },
-
- /** string rep */
- toString: function() {
- return ["TagsEditor(", `${this.model}`, ")"].join("");
- }
- });
-
-export default {
- TagsEditor: TagsEditor
-};
diff --git a/client/galaxy/scripts/mvc/tool/tool-form-composite.js b/client/galaxy/scripts/mvc/tool/tool-form-composite.js
index 75c642e6ff63..fe1512fee99f 100644
--- a/client/galaxy/scripts/mvc/tool/tool-form-composite.js
+++ b/client/galaxy/scripts/mvc/tool/tool-form-composite.js
@@ -14,6 +14,7 @@ import ToolFormBase from "mvc/tool/tool-form-base";
import Modal from "mvc/ui/ui-modal";
import Webhooks from "mvc/webhooks";
import WorkflowIcons from "mvc/workflow/workflow-icons";
+import { mountWorkflowInvocationState } from "components/WorkflowInvocationState";
var View = Backbone.View.extend({
initialize: function(options) {
@@ -628,7 +629,7 @@ var View = Backbone.View.extend({
Galaxy.emit.debug("tool-form-composite::submit", "Submission successful.", response);
self.$el.children().hide();
self.$el.append(self._templateSuccess(response));
-
+ mountWorkflowInvocationState();
// Show Webhook if job is running
if ($.isArray(response) && response.length > 0) {
self.$el.append($("", { id: "webhook-view" }));
@@ -740,7 +741,8 @@ var View = Backbone.View.extend({
response[0].history_id
}">Switch to that history now.`;
}
- return $(`
+ let success = `
+
"
- );
- }
-});
-
-export default {
- View: WorkflowListView
-};
diff --git a/client/galaxy/scripts/polyfills.js b/client/galaxy/scripts/polyfills.js
index 0873070359e7..0ea674d30ab9 100644
--- a/client/galaxy/scripts/polyfills.js
+++ b/client/galaxy/scripts/polyfills.js
@@ -13,7 +13,6 @@ import _ from "underscore";
* So, analysis-polyfills.js, reports-polyfills.js (or analysis/polyfills)
*/
"use strict";
- /*globals window, clearTimeout */
// ------------------------------------------------------------------ polyfills
// console protection needed in some versions of IE (at this point (IE>=9), shouldn't be needed)
@@ -52,7 +51,7 @@ import _ from "underscore";
if (!window.cancelAnimationFrame)
window.cancelAnimationFrame = id => {
- clearTimeout(id);
+ window.clearTimeout(id);
};
// ------------------------------------------------------------------ can't/won't polyfill
diff --git a/client/galaxy/scripts/store/index.js b/client/galaxy/scripts/store/index.js
index 57f2183f1518..4fae6928c076 100644
--- a/client/galaxy/scripts/store/index.js
+++ b/client/galaxy/scripts/store/index.js
@@ -9,6 +9,7 @@ import createCache from "vuex-cache";
import { gridSearchStore } from "./gridSearchStore";
import { tagStore } from "./tagStore";
import { jobMetricsStore } from "./jobMetricsStore";
+import { invocationStore } from "./invocationStore";
Vue.use(Vuex);
@@ -17,6 +18,7 @@ export default new Vuex.Store({
modules: {
gridSearch: gridSearchStore,
tags: tagStore,
- jobMetrics: jobMetricsStore
+ jobMetrics: jobMetricsStore,
+ invocations: invocationStore
}
});
diff --git a/client/galaxy/scripts/store/invocationStore.js b/client/galaxy/scripts/store/invocationStore.js
new file mode 100644
index 000000000000..2c2e767705f3
--- /dev/null
+++ b/client/galaxy/scripts/store/invocationStore.js
@@ -0,0 +1,44 @@
+export const state = {
+ invocationDetailsById: {},
+ invocationJobsSummaryById: {}
+};
+
+import Vue from "vue";
+import { getAppRoot } from "onload/loadConfig";
+import axios from "axios";
+
+const getters = {
+ getInvocationById: state => invocationId => {
+ return state.invocationDetailsById[invocationId];
+ },
+ getInvocationJobsSummaryById: state => invocationId => {
+ return state.invocationJobsSummaryById[invocationId];
+ }
+};
+
+const actions = {
+ fetchInvocationForId: async ({ commit }, invocationId) => {
+ const { data } = await axios.get(`${getAppRoot()}api/invocations/${invocationId}`);
+ commit("saveInvocationForId", { invocationId, invocationData: data });
+ },
+ fetchInvocationJobsSummaryForId: async ({ commit }, invocationId) => {
+ const { data } = await axios.get(`${getAppRoot()}api/invocations/${invocationId}/jobs_summary`);
+ commit("saveInvocationJobsSummaryForId", { invocationId, jobsSummary: data });
+ }
+};
+
+const mutations = {
+ saveInvocationForId: (state, { invocationId, invocationData }) => {
+ Vue.set(state.invocationDetailsById, invocationId, invocationData);
+ },
+ saveInvocationJobsSummaryForId: (state, { invocationId, jobsSummary }) => {
+ Vue.set(state.invocationJobsSummaryById, invocationId, jobsSummary);
+ }
+};
+
+export const invocationStore = {
+ state,
+ getters,
+ actions,
+ mutations
+};
diff --git a/client/galaxy/scripts/utils/ajax-queue.js b/client/galaxy/scripts/utils/ajax-queue.js
index f7b82df6c8b9..a2e3836973d1 100644
--- a/client/galaxy/scripts/utils/ajax-queue.js
+++ b/client/galaxy/scripts/utils/ajax-queue.js
@@ -137,10 +137,10 @@ class NamedAjaxQueue extends AjaxQueue {
/** add the obj.fn to the queue if obj.name hasn't been used before */
add(obj) {
- if (!(obj.hasOwnProperty("name") && obj.hasOwnProperty("fn"))) {
+ if (!(Object.prototype.hasOwnProperty.call(obj, "name") && Object.prototype.hasOwnProperty.call(obj, "fn"))) {
throw new Error(`NamedAjaxQueue.add requires an object with both "name" and "fn": ${JSON.stringify(obj)}`);
}
- if (this.names.hasOwnProperty(obj.name)) {
+ if (Object.prototype.hasOwnProperty.call(this.names, obj.name)) {
//console.warn( 'name has been used:', obj.name );
return;
}
diff --git a/client/galaxy/scripts/utils/graph.js b/client/galaxy/scripts/utils/graph.js
index 7e4f980740df..00ba79c4e7d0 100644
--- a/client/galaxy/scripts/utils/graph.js
+++ b/client/galaxy/scripts/utils/graph.js
@@ -1,8 +1,8 @@
/** deep equal of two dictionaries */
function matches(d, d2) {
for (var k in d2) {
- if (d2.hasOwnProperty(k)) {
- if (!d.hasOwnProperty(k) || d[k] !== d2[k]) {
+ if (Object.prototype.hasOwnProperty.call(d2, k)) {
+ if (!Object.prototype.hasOwnProperty.call(d, k) || d[k] !== d2[k]) {
return false;
}
}
@@ -20,7 +20,7 @@ function iterate(obj, propsOrFn) {
var returned = [];
var index = 0;
for (var key in obj) {
- if (obj.hasOwnProperty(key)) {
+ if (Object.prototype.hasOwnProperty.call(obj, key)) {
var value = obj[key];
if (fn) {
returned.push(fn.call(value, value, key, index));
@@ -313,10 +313,10 @@ Graph.prototype.read = function(data) {
return this;
}
var self = this;
- if (data.hasOwnProperty("nodes")) {
+ if (Object.prototype.hasOwnProperty.call(data, "nodes")) {
return self.readNodesAndLinks(data);
}
- if (data.hasOwnProperty("vertices")) {
+ if (Object.prototype.hasOwnProperty.call(data, "vertices")) {
return self.readVerticesAndEdges(data);
}
return self;
@@ -325,7 +325,7 @@ Graph.prototype.read = function(data) {
//TODO: the next two could be combined
/** Create the graph using a list of nodes and a list of edges (where source and target are indeces into nodes) */
Graph.prototype.readNodesAndLinks = function(data) {
- if (!(data && data.hasOwnProperty("nodes"))) {
+ if (!(data && Object.prototype.hasOwnProperty.call(data, "nodes"))) {
return this;
}
//console.debug( 'readNodesAndLinks:', data );
@@ -348,7 +348,7 @@ Graph.prototype.readNodesAndLinks = function(data) {
/** Create the graph using a list of nodes and a list of edges (where source and target are names of nodes) */
Graph.prototype.readVerticesAndEdges = function(data) {
- if (!(data && data.hasOwnProperty("vertices"))) {
+ if (!(data && Object.prototype.hasOwnProperty.call(data, "vertices"))) {
return this;
}
//console.debug( 'readVerticesAndEdges:', data );
diff --git a/client/galaxy/scripts/utils/localization.js b/client/galaxy/scripts/utils/localization.js
index cf7269f662b8..e4d6a84e4ada 100644
--- a/client/galaxy/scripts/utils/localization.js
+++ b/client/galaxy/scripts/utils/localization.js
@@ -3,7 +3,7 @@ define(["i18n!nls/locale"], function(localeStrings) {
// =============================================================================
/** Simple string replacement localization. Language data from galaxy/scripts/nls */
- if (localeStrings.hasOwnProperty("__root")) {
+ if (Object.prototype.hasOwnProperty.call(localeStrings, "__root")) {
//console.debug( 'amdi18n+webpack localization for ' + locale + ' loaded' );
const locale = sessionStorage.getItem("currentLocale");
@@ -29,7 +29,7 @@ define(["i18n!nls/locale"], function(localeStrings) {
// //TODO: conditional compile on DEBUG flag
// // cache strings that need to be localized but haven't been?
- // if( localize.cacheNonLocalized && !localeStrings.hasOwnProperty( strToLocalize ) ){
+ // if( localize.cacheNonLocalized && !Object.prototype.hasOwnProperty.call(localeStrings, strToLocalize ) ){
// // console.debug( 'localization NOT found:', strToLocalize );
// // add nonCached as hash directly to this function
// localize.nonLocalized = localize.nonLocalized || {};
diff --git a/client/galaxy/scripts/utils/metrics-logger.js b/client/galaxy/scripts/utils/metrics-logger.js
index bb00bd7c5b78..e8dfb3bac33d 100644
--- a/client/galaxy/scripts/utils/metrics-logger.js
+++ b/client/galaxy/scripts/utils/metrics-logger.js
@@ -1,4 +1,3 @@
-/*global window, jQuery, console */
/*=============================================================================
TODO:
while anon: logs saved to 'logs-null' - this will never post
@@ -27,6 +26,8 @@ TODO:
* > panel.metric( 'something weird with window', { window : window })
* !'Metrics logger could not stringify logArguments: ...'
*/
+import jQuery from "jquery";
+
function MetricsLogger(options) {
options = options || {};
var self = this;
@@ -99,8 +100,10 @@ MetricsLogger.prototype._init = function _init(options) {
var self = this;
self.options = {};
for (var k in MetricsLogger.defaultOptions) {
- if (MetricsLogger.defaultOptions.hasOwnProperty(k)) {
- self.options[k] = options.hasOwnProperty(k) ? options[k] : MetricsLogger.defaultOptions[k];
+ if (Object.prototype.hasOwnProperty.call(MetricsLogger.defaultOptions, k)) {
+ self.options[k] = Object.prototype.hasOwnProperty.call(options, k)
+ ? options[k]
+ : MetricsLogger.defaultOptions[k];
}
}
self.options.logLevel = self._parseLevel(self.options.logLevel);
@@ -140,7 +143,7 @@ MetricsLogger.prototype._parseLevel = function _parseLevel(level) {
}
if (type === "string") {
var upper = level.toUpperCase();
- if (MetricsLogger.hasOwnProperty(upper)) {
+ if (Object.prototype.hasOwnProperty.call(MetricsLogger, upper)) {
return MetricsLogger[upper];
}
}
@@ -280,7 +283,7 @@ MetricsLogger.prototype._postCache = function _postCache(options) {
MetricsLogger.prototype._delayPost = function _delayPost() {
//TODO: this won't work between pages
var self = this;
- self._waiting = setTimeout(() => {
+ self._waiting = window.setTimeout(() => {
self._waiting = null;
}, self.options.delayPostInMs);
};
diff --git a/client/galaxy/scripts/utils/safeAssign.js b/client/galaxy/scripts/utils/safeAssign.js
index b10591e7fbf6..fda9c7724fec 100644
--- a/client/galaxy/scripts/utils/safeAssign.js
+++ b/client/galaxy/scripts/utils/safeAssign.js
@@ -6,6 +6,6 @@ export function safeAssign(target, source = {}) {
source = {};
}
Object.keys(source)
- .filter(prop => target.hasOwnProperty(prop))
+ .filter(prop => Object.prototype.hasOwnProperty.call(target, prop))
.forEach(prop => (target[prop] = source[prop]));
}
diff --git a/client/galaxy/style/scss/base.scss b/client/galaxy/style/scss/base.scss
index 37df82d66c0f..545352fd8892 100644
--- a/client/galaxy/style/scss/base.scss
+++ b/client/galaxy/style/scss/base.scss
@@ -2,6 +2,7 @@
@import "~bootstrap/scss/_functions.scss";
@import "theme/blue.scss";
@import "~bootstrap/scss/bootstrap.scss";
+@import "~bootstrap-vue/src/index.scss";
// Bootstrap-related style overrides
@import "overrides.scss";
@@ -1023,6 +1024,9 @@ ul.manage-table-actions li {
.ok {
background: $state-success-bg;
}
+ .error {
+ background: $state-danger-bg;
+ }
.note {
@extend .position-absolute;
@extend .ml-2;
@@ -1377,7 +1381,6 @@ div.toolPanelLabel {
text-transform: uppercase;
}
-
div.toolSectionTitle,
div.toolTitle,
div.toolTitleNoSection {
diff --git a/client/package.json b/client/package.json
index fcc03066f946..091407be0520 100644
--- a/client/package.json
+++ b/client/package.json
@@ -20,7 +20,7 @@
"bibtex-parse-js": "^0.0.24",
"bootstrap": "4.3.1",
"bootstrap-tour": "https://github.com/sorich87/bootstrap-tour.git#65a49742e131d19f41e3f5bf63588995f7b8a9e0",
- "bootstrap-vue": "^2.0.0-rc.16",
+ "bootstrap-vue": "^2.0.1",
"d3": "3",
"decode-uri-component": "^0.2.0",
"flush-promises": "^1.0.2",
@@ -67,6 +67,7 @@
"build-charts": "webpack -p --config ../config/plugins/visualizations/charts/webpack.config.js",
"build-scatterplot": "NODE_PATH=./node_modules webpack -p --config ../config/plugins/visualizations/scatterplot/webpack.config.js",
"prettier": "prettier --write 'galaxy/style/scss/**/*.scss' 'galaxy/scripts/**/{*.js,*.vue}' '!galaxy/scripts/libs/**'",
+ "prettier-check": "prettier --check 'galaxy/style/scss/**/*.scss' 'galaxy/scripts/**/{*.js,*.vue}' '!galaxy/scripts/libs/**'",
"styleguide": "vue-styleguidist server",
"styleguide:build": "vue-styleguidist build",
"test": "npm run test-mocha && npm run test-qunit",
@@ -80,6 +81,7 @@
"@babel/plugin-syntax-dynamic-import": "^7.2.0",
"@babel/preset-env": "^7.4.2",
"amdi18n-loader": "^0.9.2",
+ "babel-eslint": "^10.0.3",
"babel-loader": "^8.0.5",
"babel-plugin-rewire": "^1.2.0",
"babel-plugin-transform-inline-environment-variables": "^0.4.3",
@@ -88,11 +90,11 @@
"css-loader": "^2.1.1",
"del": "^4.0.0",
"duplicate-package-checker-webpack-plugin": "^3.0.0",
- "eslint": "^5.15.3",
- "eslint-plugin-html": "^5.0.3",
- "eslint-plugin-import": "^2.16.0",
- "eslint-plugin-prettier": "^3.0.1",
- "eslint-plugin-vue": "^5.2.2",
+ "eslint": "^6.4.0",
+ "eslint-plugin-html": "^6.0.0",
+ "eslint-plugin-import": "^2.18.2",
+ "eslint-plugin-prettier": "^3.1.1",
+ "eslint-plugin-vue": "^5.2.3",
"expose-loader": "^0.7.5",
"file-loader": "^3.0.1",
"gulp": "^4.0.0",
diff --git a/client/yarn.lock b/client/yarn.lock
index 13bda33421f1..fad47334b755 100644
--- a/client/yarn.lock
+++ b/client/yarn.lock
@@ -16,6 +16,13 @@
dependencies:
"@babel/highlight" "7.0.0-beta.44"
+"@babel/code-frame@^7.5.5":
+ version "7.5.5"
+ resolved "https://registry.yarnpkg.com/@babel/code-frame/-/code-frame-7.5.5.tgz#bc0782f6d69f7b7d49531219699b988f669a8f9d"
+ integrity sha512-27d4lZoomVyo51VegxI20xZPuSHusqbQag/ztrBC7wegWoQ1nLREPVSKSW8byhTlzTKyNE4ifaTA6lCp7JjpFw==
+ dependencies:
+ "@babel/highlight" "^7.0.0"
+
"@babel/core@^7.4.0":
version "7.4.0"
resolved "https://registry.yarnpkg.com/@babel/core/-/core-7.4.0.tgz#248fd6874b7d755010bfe61f557461d4f446d9e9"
@@ -69,6 +76,17 @@
source-map "^0.5.0"
trim-right "^1.0.1"
+"@babel/generator@^7.6.0":
+ version "7.6.0"
+ resolved "https://registry.yarnpkg.com/@babel/generator/-/generator-7.6.0.tgz#e2c21efbfd3293ad819a2359b448f002bfdfda56"
+ integrity sha512-Ms8Mo7YBdMMn1BYuNtKuP/z0TgEIhbcyB8HVR6PPNYp4P61lMsABiS4A3VG1qznjXVCf3r+fVHhm4efTYVsySA==
+ dependencies:
+ "@babel/types" "^7.6.0"
+ jsesc "^2.5.1"
+ lodash "^4.17.13"
+ source-map "^0.5.0"
+ trim-right "^1.0.1"
+
"@babel/helper-annotate-as-pure@^7.0.0":
version "7.0.0"
resolved "https://registry.yarnpkg.com/@babel/helper-annotate-as-pure/-/helper-annotate-as-pure-7.0.0.tgz#323d39dd0b50e10c7c06ca7d7638e6864d8c5c32"
@@ -254,6 +272,13 @@
dependencies:
"@babel/types" "^7.4.0"
+"@babel/helper-split-export-declaration@^7.4.4":
+ version "7.4.4"
+ resolved "https://registry.yarnpkg.com/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.4.4.tgz#ff94894a340be78f53f06af038b205c49d993677"
+ integrity sha512-Ro/XkzLf3JFITkW6b+hNxzZ1n5OQ80NvIUdmHspih1XAhtN3vPTuUFT4eQnela+2MaZ5ulH+iyP513KJrxbN7Q==
+ dependencies:
+ "@babel/types" "^7.4.4"
+
"@babel/helper-wrap-function@^7.1.0":
version "7.1.0"
resolved "https://registry.yarnpkg.com/@babel/helper-wrap-function/-/helper-wrap-function-7.1.0.tgz#8cf54e9190706067f016af8f75cb3df829cc8c66"
@@ -291,6 +316,11 @@
esutils "^2.0.2"
js-tokens "^4.0.0"
+"@babel/parser@^7.0.0", "@babel/parser@^7.6.0":
+ version "7.6.0"
+ resolved "https://registry.yarnpkg.com/@babel/parser/-/parser-7.6.0.tgz#3e05d0647432a8326cb28d0de03895ae5a57f39b"
+ integrity sha512-+o2q111WEx4srBs7L9eJmcwi655eD8sXniLqMB93TBK9GrNzGrxDWSjiqz2hLU0Ha8MTXFIP0yd9fNdP+m43ZQ==
+
"@babel/parser@^7.1.2", "@babel/parser@^7.1.3":
version "7.1.3"
resolved "https://registry.yarnpkg.com/@babel/parser/-/parser-7.1.3.tgz#2c92469bac2b7fbff810b67fca07bd138b48af77"
@@ -715,6 +745,21 @@
invariant "^2.2.0"
lodash "^4.2.0"
+"@babel/traverse@^7.0.0":
+ version "7.6.0"
+ resolved "https://registry.yarnpkg.com/@babel/traverse/-/traverse-7.6.0.tgz#389391d510f79be7ce2ddd6717be66d3fed4b516"
+ integrity sha512-93t52SaOBgml/xY74lsmt7xOR4ufYvhb5c5qiM6lu4J/dWGMAfAh6eKw4PjLes6DI6nQgearoxnFJk60YchpvQ==
+ dependencies:
+ "@babel/code-frame" "^7.5.5"
+ "@babel/generator" "^7.6.0"
+ "@babel/helper-function-name" "^7.1.0"
+ "@babel/helper-split-export-declaration" "^7.4.4"
+ "@babel/parser" "^7.6.0"
+ "@babel/types" "^7.6.0"
+ debug "^4.1.0"
+ globals "^11.1.0"
+ lodash "^4.17.13"
+
"@babel/traverse@^7.1.0":
version "7.1.4"
resolved "https://registry.yarnpkg.com/@babel/traverse/-/traverse-7.1.4.tgz#f4f83b93d649b4b2c91121a9087fa2fa949ec2b4"
@@ -772,6 +817,15 @@
lodash "^4.17.11"
to-fast-properties "^2.0.0"
+"@babel/types@^7.4.4", "@babel/types@^7.6.0":
+ version "7.6.1"
+ resolved "https://registry.yarnpkg.com/@babel/types/-/types-7.6.1.tgz#53abf3308add3ac2a2884d539151c57c4b3ac648"
+ integrity sha512-X7gdiuaCmA0uRjCmRtYJNAVCc/q+5xSgsfKJHqMN4iNLILX39677fJE1O40arPMh0TTtS9ItH67yre6c7k6t0g==
+ dependencies:
+ esutils "^2.0.2"
+ lodash "^4.17.13"
+ to-fast-properties "^2.0.0"
+
"@handsontable/vue@^2.0.0-beta1":
version "2.0.0"
resolved "https://registry.yarnpkg.com/@handsontable/vue/-/vue-2.0.0.tgz#28ae7d247a89738088abc32584562925dde18db0"
@@ -797,14 +851,14 @@
resolved "https://registry.yarnpkg.com/@nodelib/fs.stat/-/fs.stat-1.1.3.tgz#2b5a3ab3f918cca48a8c754c08168e3f03eba61b"
integrity sha512-shAmDyaQC4H92APFoIaVDHCx5bStIocgvbwQyxPRrbUY20V1EYTbSDchWbuwlMG3V17cprZhA6+78JfB+3DTPw==
-"@nuxt/opencollective@^0.2.1":
- version "0.2.1"
- resolved "https://registry.yarnpkg.com/@nuxt/opencollective/-/opencollective-0.2.1.tgz#8290f1220072637e575c3935733719a78ad2d056"
- integrity sha512-NP2VSUKRFGutbhWeKgIU0MnY4fmpH8UWxxwTJNPurCQ5BeWhOxp+Gp5ltO39P/Et/J2GYGb3+ALNqZJ+5cGBBw==
+"@nuxt/opencollective@^0.3.0":
+ version "0.3.0"
+ resolved "https://registry.yarnpkg.com/@nuxt/opencollective/-/opencollective-0.3.0.tgz#11d8944dcf2d526e31660bb69570be03f8fb72b7"
+ integrity sha512-Vf09BxCdj1iT2IRqVwX5snaY2WCTkvM0O4cWWSO1ThCFuc4if0Q/nNwAgCxRU0FeYHJ7DdyMUNSdswCLKlVqeg==
dependencies:
- chalk "^2.4.1"
- consola "^2.3.0"
- node-fetch "^2.3.0"
+ chalk "^2.4.2"
+ consola "^2.10.1"
+ node-fetch "^2.6.0"
"@sinonjs/formatio@3.0.0":
version "3.0.0"
@@ -1114,6 +1168,11 @@ acorn-jsx@^5.0.1:
resolved "https://registry.yarnpkg.com/acorn-jsx/-/acorn-jsx-5.0.1.tgz#32a064fd925429216a09b141102bfdd185fae40e"
integrity sha512-HJ7CfNHrfJLlNTzIEUTj43LNWGkqpRLxm3YjAlcD0ACydk9XynzYsCBHxut+iqt+1aBXkx9UP/w/ZqMr13XIzg==
+acorn-jsx@^5.0.2:
+ version "5.0.2"
+ resolved "https://registry.yarnpkg.com/acorn-jsx/-/acorn-jsx-5.0.2.tgz#84b68ea44b373c4f8686023a551f61a21b7c4a4f"
+ integrity sha512-tiNTrP1MP0QrChmD2DdupCr6HWSFeKVw5d/dHTu4Y7rkAkRhU/Dt7dphAfIUyxtHpl/eBVip5uTNSpQJHylpAw==
+
acorn@^3.1.0:
version "3.3.0"
resolved "https://registry.yarnpkg.com/acorn/-/acorn-3.3.0.tgz#45e37fb39e8da3f25baee3ff5369e2bb5f22017a"
@@ -1134,11 +1193,16 @@ acorn@^6.0.2:
resolved "https://registry.yarnpkg.com/acorn/-/acorn-6.0.2.tgz#6a459041c320ab17592c6317abbfdf4bbaa98ca4"
integrity sha512-GXmKIvbrN3TV7aVqAzVFaMW8F8wzVX7voEBRO3bDA64+EX37YSayggRJP5Xig6HYHBkWKpFg9W5gg6orklubhg==
-acorn@^6.0.5, acorn@^6.0.7, acorn@^6.1.0, acorn@^6.1.1:
+acorn@^6.0.5, acorn@^6.1.0, acorn@^6.1.1:
version "6.1.1"
resolved "https://registry.yarnpkg.com/acorn/-/acorn-6.1.1.tgz#7d25ae05bb8ad1f9b699108e1094ecd7884adc1f"
integrity sha512-jPTiwtOxaHNaAPg/dmrJ/beuzLRnXtB0kQPQ8JpotKJgTB6rX6c8mlf315941pyjBSaPg8NHXS9fhP4u17DpGA==
+acorn@^7.0.0:
+ version "7.0.0"
+ resolved "https://registry.yarnpkg.com/acorn/-/acorn-7.0.0.tgz#26b8d1cd9a9b700350b71c0905546f64d1284e7a"
+ integrity sha512-PaF/MduxijYYt7unVGRuds1vBC9bFxbNf+VWqhOClfdgy7RlVkQqt610ig1/yxTgsDIfW1cWDel5EBbOy3jdtQ==
+
address@1.0.3, address@^1.0.1:
version "1.0.3"
resolved "https://registry.yarnpkg.com/address/-/address-1.0.3.tgz#b5f50631f8d6cec8bd20c963963afb55e06cbce9"
@@ -1179,6 +1243,16 @@ ajv@^6.1.0:
json-schema-traverse "^0.4.1"
uri-js "^4.2.2"
+ajv@^6.10.0:
+ version "6.10.2"
+ resolved "https://registry.yarnpkg.com/ajv/-/ajv-6.10.2.tgz#d3cea04d6b017b2894ad69040fec8b623eb4bd52"
+ integrity sha512-TXtUUEYHuaTEbLZWIKUr5pmBuhDLy+8KYtPYdcV8qC+pOZL+NKqYwvWSRrVXHn+ZmRRAu8vJTAznH7Oag6RVRw==
+ dependencies:
+ fast-deep-equal "^2.0.1"
+ fast-json-stable-stringify "^2.0.0"
+ json-schema-traverse "^0.4.1"
+ uri-js "^4.2.2"
+
ajv@^6.9.1:
version "6.10.0"
resolved "https://registry.yarnpkg.com/ajv/-/ajv-6.10.0.tgz#90d0d54439da587cd7e843bfb7045f50bd22bdf1"
@@ -1423,6 +1497,14 @@ array-from@^2.1.1:
resolved "https://registry.yarnpkg.com/array-from/-/array-from-2.1.1.tgz#cfe9d8c26628b9dc5aecc62a9f5d8f1f352c1195"
integrity sha1-z+nYwmYoudxa7MYqn12PHzUsEZU=
+array-includes@^3.0.3:
+ version "3.0.3"
+ resolved "https://registry.yarnpkg.com/array-includes/-/array-includes-3.0.3.tgz#184b48f62d92d7452bb31b323165c7f8bd02266d"
+ integrity sha1-GEtI9i2S10UrsxsyMWXH+L0CJm0=
+ dependencies:
+ define-properties "^1.1.2"
+ es-abstract "^1.7.0"
+
array-initial@^1.0.0:
version "1.1.0"
resolved "https://registry.yarnpkg.com/array-initial/-/array-initial-1.1.0.tgz#2fa74b26739371c3947bd7a7adc73be334b3d795"
@@ -1759,6 +1841,18 @@ babel-core@^6.26.0, babel-core@^6.3.21:
slash "^1.0.0"
source-map "^0.5.7"
+babel-eslint@^10.0.3:
+ version "10.0.3"
+ resolved "https://registry.yarnpkg.com/babel-eslint/-/babel-eslint-10.0.3.tgz#81a2c669be0f205e19462fed2482d33e4687a88a"
+ integrity sha512-z3U7eMY6r/3f3/JB9mTsLjyxrv0Yb1zb8PCWCLpguxfCzBIZUwy23R1t/XKewP+8mEN2Ck8Dtr4q20z6ce6SoA==
+ dependencies:
+ "@babel/code-frame" "^7.0.0"
+ "@babel/parser" "^7.0.0"
+ "@babel/traverse" "^7.0.0"
+ "@babel/types" "^7.0.0"
+ eslint-visitor-keys "^1.0.0"
+ resolve "^1.12.0"
+
babel-eslint@^8.2.3:
version "8.2.6"
resolved "https://registry.yarnpkg.com/babel-eslint/-/babel-eslint-8.2.6.tgz#6270d0c73205628067c0f7ae1693a9e797acefd9"
@@ -2586,17 +2680,18 @@ boolbase@^1.0.0, boolbase@~1.0.0:
jquery "^3.0.0"
popper.js "^1.12.3"
-bootstrap-vue@^2.0.0-rc.16:
- version "2.0.0-rc.16"
- resolved "https://registry.yarnpkg.com/bootstrap-vue/-/bootstrap-vue-2.0.0-rc.16.tgz#7302313ad4c5e29e88b9009e26fb2c9b6d81a1fb"
- integrity sha512-fhiyqG6i3ITF7fAzAjMexikGUgBZ/GTKQi0mCK48FacB5tiq2KUXE0Qilb/CW090PkqEw2W+7AP2/k5/dAa/MQ==
+bootstrap-vue@^2.0.1:
+ version "2.0.1"
+ resolved "https://registry.yarnpkg.com/bootstrap-vue/-/bootstrap-vue-2.0.1.tgz#970d3de2fe2064e123507c59959e328afd66e8a7"
+ integrity sha512-mb2MP0f3KNRi/D8W0cItX2joM+XbwciFx1JKjtdKzt4uIXNx2SpOwF/wle26CszB1S35ArXJ/ZVN0dX8ry4yng==
dependencies:
- "@nuxt/opencollective" "^0.2.1"
- bootstrap "^4.3.1"
- popper.js "^1.14.7"
- vue-functional-data-merge "^2.0.7"
+ "@nuxt/opencollective" "^0.3.0"
+ bootstrap ">=4.3.1 <5.0.0"
+ popper.js "^1.15.0"
+ portal-vue "^2.1.6"
+ vue-functional-data-merge "^3.1.0"
-bootstrap@4.3.1, bootstrap@^4.3.1:
+bootstrap@4.3.1, "bootstrap@>=4.3.1 <5.0.0":
version "4.3.1"
resolved "https://registry.yarnpkg.com/bootstrap/-/bootstrap-4.3.1.tgz#280ca8f610504d99d7b6b4bfc4b68cec601704ac"
integrity sha512-rXqOmH1VilAt2DyPzluTi2blhk17bO7ef+zLLPlWvG494pDxcM234pJ8wTc/6R40UWizAIIMgxjvxZg5kmsbag==
@@ -3651,10 +3746,10 @@ connect@^3.6.0:
parseurl "~1.3.2"
utils-merge "1.0.1"
-consola@^2.3.0:
- version "2.5.6"
- resolved "https://registry.yarnpkg.com/consola/-/consola-2.5.6.tgz#5ce14dbaf6f5b589c8a258ef80ed97b752fa57d5"
- integrity sha512-DN0j6ewiNWkT09G3ZoyyzN3pSYrjxWcx49+mHu+oDI5dvW5vzmyuzYsqGS79+yQserH9ymJQbGzeqUejfssr8w==
+consola@^2.10.1:
+ version "2.10.1"
+ resolved "https://registry.yarnpkg.com/consola/-/consola-2.10.1.tgz#4693edba714677c878d520e4c7e4f69306b4b927"
+ integrity sha512-4sxpH6SGFYLADfUip4vuY65f/gEogrzJoniVhNUYkJHtng0l8ZjnDCqxxrSVRHOHwKxsy8Vm5ONZh1wOR3/l/w==
console-browserify@^1.1.0:
version "1.1.0"
@@ -4244,7 +4339,7 @@ defaults@^1.0.3:
dependencies:
clone "^1.0.2"
-define-properties@^1.1.2:
+define-properties@^1.1.2, define-properties@^1.1.3:
version "1.1.3"
resolved "https://registry.yarnpkg.com/define-properties/-/define-properties-1.1.3.tgz#cf88da6cbee26fe6db7094f61d870cbd84cee9f1"
integrity sha512-3MqfYKj2lLzdMSf8ZIZE/V+Zuy+BgD6f164e8K2w7dgnpKArBDerGYpM46IYYcjnkdPNMjPk9A6VFB8+3SKlXQ==
@@ -4474,11 +4569,16 @@ domain-browser@^1.1.1:
resolved "https://registry.yarnpkg.com/domain-browser/-/domain-browser-1.2.0.tgz#3d31f50191a6749dd1375a7f522e823d42e54eda"
integrity sha512-jnjyiM6eRyZl2H+W8Q/zLMA481hzi0eszAaBUzIVnmYVDBbnLxVNnfu1HgEBvCbL+71FrxMl3E6lpKH7Ge3OXA==
-domelementtype@1, domelementtype@^1.3.0:
+domelementtype@1:
version "1.3.0"
resolved "https://registry.yarnpkg.com/domelementtype/-/domelementtype-1.3.0.tgz#b17aed82e8ab59e52dd9c19b1756e0fc187204c2"
integrity sha1-sXrtguirWeUt2cGbF1bg/BhyBMI=
+domelementtype@^1.3.1:
+ version "1.3.1"
+ resolved "https://registry.yarnpkg.com/domelementtype/-/domelementtype-1.3.1.tgz#d048c44b37b0d10a7f2a3d5fee3f4333d790481f"
+ integrity sha512-BSKB+TSpMpFI/HOxCNr1O8aMOTZ8hT3pM3GQ0w/mWRmkhEDSFJkkyzz4XQsBV44BChwGkrDfMyjVD0eA2aFV3w==
+
domelementtype@~1.1.1:
version "1.1.3"
resolved "https://registry.yarnpkg.com/domelementtype/-/domelementtype-1.1.3.tgz#bd28773e2642881aec51544924299c5cd822185b"
@@ -4715,6 +4815,22 @@ error-ex@^1.2.0, error-ex@^1.3.1:
dependencies:
is-arrayish "^0.2.1"
+es-abstract@^1.12.0, es-abstract@^1.7.0:
+ version "1.14.2"
+ resolved "https://registry.yarnpkg.com/es-abstract/-/es-abstract-1.14.2.tgz#7ce108fad83068c8783c3cdf62e504e084d8c497"
+ integrity sha512-DgoQmbpFNOofkjJtKwr87Ma5EW4Dc8fWhD0R+ndq7Oc456ivUfGOOP6oAZTTKl5/CcNMP+EN+e3/iUzgE0veZg==
+ dependencies:
+ es-to-primitive "^1.2.0"
+ function-bind "^1.1.1"
+ has "^1.0.3"
+ has-symbols "^1.0.0"
+ is-callable "^1.1.4"
+ is-regex "^1.0.4"
+ object-inspect "^1.6.0"
+ object-keys "^1.1.1"
+ string.prototype.trimleft "^2.0.0"
+ string.prototype.trimright "^2.0.0"
+
es-abstract@^1.5.1, es-abstract@^1.6.1:
version "1.12.0"
resolved "https://registry.yarnpkg.com/es-abstract/-/es-abstract-1.12.0.tgz#9dbbdd27c6856f0001421ca18782d786bf8a6165"
@@ -4726,7 +4842,7 @@ es-abstract@^1.5.1, es-abstract@^1.6.1:
is-callable "^1.1.3"
is-regex "^1.0.4"
-es-to-primitive@^1.1.1:
+es-to-primitive@^1.1.1, es-to-primitive@^1.2.0:
version "1.2.0"
resolved "https://registry.yarnpkg.com/es-to-primitive/-/es-to-primitive-1.2.0.tgz#edf72478033456e8dda8ef09e00ad9650707f377"
integrity sha512-qZryBOJjV//LaxLTV6UC//WewneB3LcXOL9NP++ozKVXsIIIpm/2c13UDiD9Jp2eThsecw9m3jPqDwTyobcdbg==
@@ -4816,48 +4932,49 @@ eslint-import-resolver-node@^0.3.2:
debug "^2.6.9"
resolve "^1.5.0"
-eslint-module-utils@^2.3.0:
- version "2.3.0"
- resolved "https://registry.yarnpkg.com/eslint-module-utils/-/eslint-module-utils-2.3.0.tgz#546178dab5e046c8b562bbb50705e2456d7bda49"
- integrity sha512-lmDJgeOOjk8hObTysjqH7wyMi+nsHwwvfBykwfhjR1LNdd7C2uFJBvx4OpWYpXOw4df1yE1cDEVd1yLHitk34w==
+eslint-module-utils@^2.4.0:
+ version "2.4.1"
+ resolved "https://registry.yarnpkg.com/eslint-module-utils/-/eslint-module-utils-2.4.1.tgz#7b4675875bf96b0dbf1b21977456e5bb1f5e018c"
+ integrity sha512-H6DOj+ejw7Tesdgbfs4jeS4YMFrT8uI8xwd1gtQqXssaR0EQ26L+2O/w6wkYFy2MymON0fTwHmXBvvfLNZVZEw==
dependencies:
debug "^2.6.8"
pkg-dir "^2.0.0"
-eslint-plugin-html@^5.0.3:
- version "5.0.3"
- resolved "https://registry.yarnpkg.com/eslint-plugin-html/-/eslint-plugin-html-5.0.3.tgz#3db133995e49a73596f6a473c16a1b83634deffd"
- integrity sha512-46ruAnp3jVQP/5Bi5eEIOooscjUTPFU3vxCxHe/OG6ORdM7Xv5c25/Nz9fAbHklzCpiXuIiH4/mV/XBkm7MINw==
+eslint-plugin-html@^6.0.0:
+ version "6.0.0"
+ resolved "https://registry.yarnpkg.com/eslint-plugin-html/-/eslint-plugin-html-6.0.0.tgz#28e5c3e71e6f612e07e73d7c215e469766628c13"
+ integrity sha512-PQcGippOHS+HTbQCStmH5MY1BF2MaU8qW/+Mvo/8xTa/ioeMXdSP+IiaBw2+nh0KEMfYQKuTz1Zo+vHynjwhbg==
dependencies:
- htmlparser2 "^3.10.0"
+ htmlparser2 "^3.10.1"
-eslint-plugin-import@^2.16.0:
- version "2.16.0"
- resolved "https://registry.yarnpkg.com/eslint-plugin-import/-/eslint-plugin-import-2.16.0.tgz#97ac3e75d0791c4fac0e15ef388510217be7f66f"
- integrity sha512-z6oqWlf1x5GkHIFgrSvtmudnqM6Q60KM4KvpWi5ubonMjycLjndvd5+8VAZIsTlHC03djdgJuyKG6XO577px6A==
+eslint-plugin-import@^2.18.2:
+ version "2.18.2"
+ resolved "https://registry.yarnpkg.com/eslint-plugin-import/-/eslint-plugin-import-2.18.2.tgz#02f1180b90b077b33d447a17a2326ceb400aceb6"
+ integrity sha512-5ohpsHAiUBRNaBWAF08izwUGlbrJoJJ+W9/TBwsGoR1MnlgfwMIKrFeSjWbt6moabiXW9xNvtFz+97KHRfI4HQ==
dependencies:
+ array-includes "^3.0.3"
contains-path "^0.1.0"
debug "^2.6.9"
doctrine "1.5.0"
eslint-import-resolver-node "^0.3.2"
- eslint-module-utils "^2.3.0"
+ eslint-module-utils "^2.4.0"
has "^1.0.3"
- lodash "^4.17.11"
minimatch "^3.0.4"
+ object.values "^1.1.0"
read-pkg-up "^2.0.0"
- resolve "^1.9.0"
+ resolve "^1.11.0"
-eslint-plugin-prettier@^3.0.1:
- version "3.0.1"
- resolved "https://registry.yarnpkg.com/eslint-plugin-prettier/-/eslint-plugin-prettier-3.0.1.tgz#19d521e3981f69dd6d14f64aec8c6a6ac6eb0b0d"
- integrity sha512-/PMttrarPAY78PLvV3xfWibMOdMDl57hmlQ2XqFeA37wd+CJ7WSxV7txqjVPHi/AAFKd2lX0ZqfsOc/i5yFCSQ==
+eslint-plugin-prettier@^3.1.1:
+ version "3.1.1"
+ resolved "https://registry.yarnpkg.com/eslint-plugin-prettier/-/eslint-plugin-prettier-3.1.1.tgz#507b8562410d02a03f0ddc949c616f877852f2ba"
+ integrity sha512-A+TZuHZ0KU0cnn56/9mfR7/KjUJ9QNVXUhwvRFSR7PGPe0zQR6PTkmyqg1AtUUEOzTqeRsUwyKFh0oVZKVCrtA==
dependencies:
prettier-linter-helpers "^1.0.0"
-eslint-plugin-vue@^5.2.2:
- version "5.2.2"
- resolved "https://registry.yarnpkg.com/eslint-plugin-vue/-/eslint-plugin-vue-5.2.2.tgz#86601823b7721b70bc92d54f1728cfc03b36283c"
- integrity sha512-CtGWH7IB0DA6BZOwcV9w9q3Ri6Yuo8qMjx05SmOGJ6X6E0Yo3y9E/gQ5tuNxg2dEt30tRnBoFTbvtmW9iEoyHA==
+eslint-plugin-vue@^5.2.3:
+ version "5.2.3"
+ resolved "https://registry.yarnpkg.com/eslint-plugin-vue/-/eslint-plugin-vue-5.2.3.tgz#3ee7597d823b5478804b2feba9863b1b74273961"
+ integrity sha512-mGwMqbbJf0+VvpGR5Lllq0PMxvTdrZ/ZPjmhkacrCHbubJeJOt+T6E3HUzAifa2Mxi7RSdJfC9HFpOeSYVMMIw==
dependencies:
vue-eslint-parser "^5.0.0"
@@ -4877,65 +4994,73 @@ eslint-scope@^4.0.0:
esrecurse "^4.1.0"
estraverse "^4.1.1"
-eslint-scope@^4.0.3:
- version "4.0.3"
- resolved "https://registry.yarnpkg.com/eslint-scope/-/eslint-scope-4.0.3.tgz#ca03833310f6889a3264781aa82e63eb9cfe7848"
- integrity sha512-p7VutNr1O/QrxysMo3E45FjYDTeXBy0iTltPFNSqKAIfjDSXC+4dj+qfyuD8bfAXrW/y6lW3O76VaYNPKfpKrg==
+eslint-scope@^5.0.0:
+ version "5.0.0"
+ resolved "https://registry.yarnpkg.com/eslint-scope/-/eslint-scope-5.0.0.tgz#e87c8887c73e8d1ec84f1ca591645c358bfc8fb9"
+ integrity sha512-oYrhJW7S0bxAFDvWqzvMPRm6pcgcnWc4QnofCAqRTRfQC0JcwenzGglTtsLyIuuWFfkqDG9vz67cnttSd53djw==
dependencies:
esrecurse "^4.1.0"
estraverse "^4.1.1"
-eslint-utils@^1.3.1:
- version "1.3.1"
- resolved "https://registry.yarnpkg.com/eslint-utils/-/eslint-utils-1.3.1.tgz#9a851ba89ee7c460346f97cf8939c7298827e512"
- integrity sha512-Z7YjnIldX+2XMcjr7ZkgEsOj/bREONV60qYeB/bjMAqqqZ4zxKyWX+BOUkdmRmA9riiIPVvo5x86m5elviOk0Q==
+eslint-utils@^1.4.2:
+ version "1.4.2"
+ resolved "https://registry.yarnpkg.com/eslint-utils/-/eslint-utils-1.4.2.tgz#166a5180ef6ab7eb462f162fd0e6f2463d7309ab"
+ integrity sha512-eAZS2sEUMlIeCjBeubdj45dmBHQwPHWyBcT1VSYB7o9x9WRRqKxyUoiXlRjyAwzN7YEzHJlYg0NmzDRWx6GP4Q==
+ dependencies:
+ eslint-visitor-keys "^1.0.0"
eslint-visitor-keys@^1.0.0:
version "1.0.0"
resolved "https://registry.yarnpkg.com/eslint-visitor-keys/-/eslint-visitor-keys-1.0.0.tgz#3f3180fb2e291017716acb4c9d6d5b5c34a6a81d"
integrity sha512-qzm/XxIbxm/FHyH341ZrbnMUpe+5Bocte9xkmFMzPMjRaZMcXww+MpBptFvtU+79L362nqiLhekCxCxDPaUMBQ==
-eslint@^5.15.3:
- version "5.15.3"
- resolved "https://registry.yarnpkg.com/eslint/-/eslint-5.15.3.tgz#c79c3909dc8a7fa3714fb340c11e30fd2526b8b5"
- integrity sha512-vMGi0PjCHSokZxE0NLp2VneGw5sio7SSiDNgIUn2tC0XkWJRNOIoHIg3CliLVfXnJsiHxGAYrkw0PieAu8+KYQ==
+eslint-visitor-keys@^1.1.0:
+ version "1.1.0"
+ resolved "https://registry.yarnpkg.com/eslint-visitor-keys/-/eslint-visitor-keys-1.1.0.tgz#e2a82cea84ff246ad6fb57f9bde5b46621459ec2"
+ integrity sha512-8y9YjtM1JBJU/A9Kc+SbaOV4y29sSWckBwMHa+FGtVj5gN/sbnKDf6xJUl+8g7FAij9LVaP8C24DUiH/f/2Z9A==
+
+eslint@^6.4.0:
+ version "6.4.0"
+ resolved "https://registry.yarnpkg.com/eslint/-/eslint-6.4.0.tgz#5aa9227c3fbe921982b2eda94ba0d7fae858611a"
+ integrity sha512-WTVEzK3lSFoXUovDHEbkJqCVPEPwbhCq4trDktNI6ygs7aO41d4cDT0JFAT5MivzZeVLWlg7vHL+bgrQv/t3vA==
dependencies:
"@babel/code-frame" "^7.0.0"
- ajv "^6.9.1"
+ ajv "^6.10.0"
chalk "^2.1.0"
cross-spawn "^6.0.5"
debug "^4.0.1"
doctrine "^3.0.0"
- eslint-scope "^4.0.3"
- eslint-utils "^1.3.1"
- eslint-visitor-keys "^1.0.0"
- espree "^5.0.1"
+ eslint-scope "^5.0.0"
+ eslint-utils "^1.4.2"
+ eslint-visitor-keys "^1.1.0"
+ espree "^6.1.1"
esquery "^1.0.1"
esutils "^2.0.2"
file-entry-cache "^5.0.1"
functional-red-black-tree "^1.0.1"
- glob "^7.1.2"
+ glob-parent "^5.0.0"
globals "^11.7.0"
ignore "^4.0.6"
import-fresh "^3.0.0"
imurmurhash "^0.1.4"
- inquirer "^6.2.2"
- js-yaml "^3.12.0"
+ inquirer "^6.4.1"
+ is-glob "^4.0.0"
+ js-yaml "^3.13.1"
json-stable-stringify-without-jsonify "^1.0.1"
levn "^0.3.0"
- lodash "^4.17.11"
+ lodash "^4.17.14"
minimatch "^3.0.4"
mkdirp "^0.5.1"
natural-compare "^1.4.0"
optionator "^0.8.2"
- path-is-inside "^1.0.2"
progress "^2.0.0"
regexpp "^2.0.1"
- semver "^5.5.1"
- strip-ansi "^4.0.0"
- strip-json-comments "^2.0.1"
+ semver "^6.1.2"
+ strip-ansi "^5.2.0"
+ strip-json-comments "^3.0.1"
table "^5.2.3"
text-table "^0.2.0"
+ v8-compile-cache "^2.0.3"
espower-location-detector@^1.0.0:
version "1.0.0"
@@ -4956,14 +5081,14 @@ espree@^4.1.0:
acorn-jsx "^5.0.0"
eslint-visitor-keys "^1.0.0"
-espree@^5.0.1:
- version "5.0.1"
- resolved "https://registry.yarnpkg.com/espree/-/espree-5.0.1.tgz#5d6526fa4fc7f0788a5cf75b15f30323e2f81f7a"
- integrity sha512-qWAZcWh4XE/RwzLJejfcofscgMc9CamR6Tn1+XRXNzrvUSSbiAjGOI/fggztjIi7y9VLPqnICMIPiGyr8JaZ0A==
+espree@^6.1.1:
+ version "6.1.1"
+ resolved "https://registry.yarnpkg.com/espree/-/espree-6.1.1.tgz#7f80e5f7257fc47db450022d723e356daeb1e5de"
+ integrity sha512-EYbr8XZUhWbYCqQRW0duU5LxzL5bETN6AjKBGy1302qqzPaCH10QbRg3Wvco79Z8x9WbiE8HYB4e75xl6qUYvQ==
dependencies:
- acorn "^6.0.7"
- acorn-jsx "^5.0.0"
- eslint-visitor-keys "^1.0.0"
+ acorn "^7.0.0"
+ acorn-jsx "^5.0.2"
+ eslint-visitor-keys "^1.1.0"
esprima@^2.1.0:
version "2.7.3"
@@ -5824,6 +5949,13 @@ glob-parent@^3.1.0:
is-glob "^3.1.0"
path-dirname "^1.0.0"
+glob-parent@^5.0.0:
+ version "5.0.0"
+ resolved "https://registry.yarnpkg.com/glob-parent/-/glob-parent-5.0.0.tgz#1dc99f0f39b006d3e92c2c284068382f0c20e954"
+ integrity sha512-Z2RwiujPRGluePM6j699ktJYxmPpJKCfpGA13jz2hmFZC7gKetzrWvg5KN3+OsIFmydGyZ1AVwERCq1w/ZZwRg==
+ dependencies:
+ is-glob "^4.0.1"
+
glob-stream@^6.1.0:
version "6.1.0"
resolved "https://registry.yarnpkg.com/glob-stream/-/glob-stream-6.1.0.tgz#7045c99413b3eb94888d83ab46d0b404cc7bdde4"
@@ -6371,17 +6503,17 @@ html-entities@^1.2.0:
resolved "https://registry.yarnpkg.com/html-entities/-/html-entities-1.2.1.tgz#0df29351f0721163515dfb9e5543e5f6eed5162f"
integrity sha1-DfKTUfByEWNRXfueVUPl9u7VFi8=
-htmlparser2@^3.10.0:
- version "3.10.0"
- resolved "https://registry.yarnpkg.com/htmlparser2/-/htmlparser2-3.10.0.tgz#5f5e422dcf6119c0d983ed36260ce9ded0bee464"
- integrity sha512-J1nEUGv+MkXS0weHNWVKJJ+UrLfePxRWpN3C9bEi9fLxL2+ggW94DQvgYVXsaT30PGwYRIZKNZXuyMhp3Di4bQ==
+htmlparser2@^3.10.1:
+ version "3.10.1"
+ resolved "https://registry.yarnpkg.com/htmlparser2/-/htmlparser2-3.10.1.tgz#bd679dc3f59897b6a34bb10749c855bb53a9392f"
+ integrity sha512-IgieNijUMbkDovyoKObU1DUhm1iwNYE/fuifEoEHfd1oZKZDaONBSkal7Y01shxsM49R4XaMdGez3WnF9UfiCQ==
dependencies:
- domelementtype "^1.3.0"
+ domelementtype "^1.3.1"
domhandler "^2.3.0"
domutils "^1.5.1"
entities "^1.1.1"
inherits "^2.0.1"
- readable-stream "^3.0.6"
+ readable-stream "^3.1.1"
http-deceiver@^1.2.7:
version "1.2.7"
@@ -6631,10 +6763,10 @@ inquirer@6.2.1:
strip-ansi "^5.0.0"
through "^2.3.6"
-inquirer@^6.2.2:
- version "6.2.2"
- resolved "https://registry.yarnpkg.com/inquirer/-/inquirer-6.2.2.tgz#46941176f65c9eb20804627149b743a218f25406"
- integrity sha512-Z2rREiXA6cHRR9KBOarR3WuLlFzlIfAEIiB45ll5SSadMg7WqOh1MKEjjndfuH5ewXdixWCxqnVfGOQzPeiztA==
+inquirer@^6.4.1:
+ version "6.5.2"
+ resolved "https://registry.yarnpkg.com/inquirer/-/inquirer-6.5.2.tgz#ad50942375d036d327ff528c08bd5fab089928ca"
+ integrity sha512-cntlB5ghuB0iuO65Ovoi8ogLHiWGs/5yNrtUcKjFhSSiVeAIVpD7koaSU9RM8mpXw5YDi9RdYXGQMaOURB7ycQ==
dependencies:
ansi-escapes "^3.2.0"
chalk "^2.4.2"
@@ -6642,12 +6774,12 @@ inquirer@^6.2.2:
cli-width "^2.0.0"
external-editor "^3.0.3"
figures "^2.0.0"
- lodash "^4.17.11"
+ lodash "^4.17.12"
mute-stream "0.0.7"
run-async "^2.2.0"
rxjs "^6.4.0"
string-width "^2.1.0"
- strip-ansi "^5.0.0"
+ strip-ansi "^5.1.0"
through "^2.3.6"
internal-ip@^4.2.0:
@@ -6947,6 +7079,13 @@ is-glob@^4.0.0:
dependencies:
is-extglob "^2.1.1"
+is-glob@^4.0.1:
+ version "4.0.1"
+ resolved "https://registry.yarnpkg.com/is-glob/-/is-glob-4.0.1.tgz#7567dbe9f2f5e2467bc77ab83c4a29482407a5dc"
+ integrity sha512-5G0tKtBTFImOqDnLB2hG6Bp2qcKEFduo4tZu9MT/H6NQv/ghhy30o55ufafxJ/LdH79LLs2Kfrn85TLKyA7BUg==
+ dependencies:
+ is-extglob "^2.1.1"
+
is-hexadecimal@^1.0.0:
version "1.0.2"
resolved "https://registry.yarnpkg.com/is-hexadecimal/-/is-hexadecimal-1.0.2.tgz#b6e710d7d07bb66b98cb8cece5c9b4921deeb835"
@@ -7292,6 +7431,14 @@ js-yaml@^3.12.0, js-yaml@^3.5.2, js-yaml@^3.9.0:
argparse "^1.0.7"
esprima "^4.0.0"
+js-yaml@^3.13.1:
+ version "3.13.1"
+ resolved "https://registry.yarnpkg.com/js-yaml/-/js-yaml-3.13.1.tgz#aff151b30bfdfa8e49e05da22e7415e9dfa37847"
+ integrity sha512-YfbcO7jXDdyj0DGxYVSlSeQNHbD7XPWvrVWeVUujrQEoZzWJIRrCPoyk6kL6IAjAG2IolMK4T0hNUe0HOUs5Jw==
+ dependencies:
+ argparse "^1.0.7"
+ esprima "^4.0.0"
+
jsbn@~0.1.0:
version "0.1.1"
resolved "https://registry.yarnpkg.com/jsbn/-/jsbn-0.1.1.tgz#a5e654c2e5a2deb5f201d96cefbca80c0ef2f513"
@@ -7811,6 +7958,11 @@ lodash@^4.0.0, lodash@^4.0.1, lodash@^4.17.10, lodash@^4.17.11, lodash@^4.17.4,
resolved "https://registry.yarnpkg.com/lodash/-/lodash-4.17.11.tgz#b39ea6229ef607ecd89e2c8df12536891cac9b8d"
integrity sha512-cQKh8igo5QUhZ7lg38DYWAxMvjSAKG0A8wGSVimP07SIUEK2UO+arSRKbRZWtelMtN5V0Hkwh5ryOto/SshYIg==
+lodash@^4.17.12, lodash@^4.17.13, lodash@^4.17.14:
+ version "4.17.15"
+ resolved "https://registry.yarnpkg.com/lodash/-/lodash-4.17.15.tgz#b447f6670a0455bbfeedd11392eff330ea097548"
+ integrity sha512-8xOcRHvCjnocdS5cpwXQXVzmmh5e5+saE2QGoeQmbKmRS6J3VQppPOIt0MnmE+4xlZoumy0GPG0D0MVIQbNA1A==
+
log-symbols@^2.1.0, log-symbols@^2.2.0:
version "2.2.0"
resolved "https://registry.yarnpkg.com/log-symbols/-/log-symbols-2.2.0.tgz#5740e1c5d6f0dfda4ad9323b5332107ef6b4c40a"
@@ -8512,10 +8664,10 @@ node-dir@^0.1.10:
dependencies:
minimatch "^3.0.2"
-node-fetch@^2.3.0:
- version "2.3.0"
- resolved "https://registry.yarnpkg.com/node-fetch/-/node-fetch-2.3.0.tgz#1a1d940bbfb916a1d3e0219f037e89e71f8c5fa5"
- integrity sha512-MOd8pV3fxENbryESLgVIeaGKrdl+uaYhCSSVkjeOb/31/njTpcis5aWfdqgNlHIrKOLRbMnfPINPOML2CIFeXA==
+node-fetch@^2.6.0:
+ version "2.6.0"
+ resolved "https://registry.yarnpkg.com/node-fetch/-/node-fetch-2.6.0.tgz#e633456386d4aa55863f676a7ab0daa8fdecb0fd"
+ integrity sha512-8dG4H5ujfvFiqDmVu9fQ5bOHUC15JMjMY/Zumv26oOvvVJjM67KF8koCWIabKQ1GJIa9r2mMZscBq/TbdOcmNA==
node-forge@0.7.5:
version "0.7.5"
@@ -8764,6 +8916,11 @@ object-copy@^0.1.0:
define-property "^0.2.5"
kind-of "^3.0.3"
+object-inspect@^1.6.0:
+ version "1.6.0"
+ resolved "https://registry.yarnpkg.com/object-inspect/-/object-inspect-1.6.0.tgz#c70b6cbf72f274aab4c34c0c82f5167bf82cf15b"
+ integrity sha512-GJzfBZ6DgDAmnuaM3104jR4s1Myxr3Y3zfIyN4z3UdqN69oSRacNK8UhnobDdC+7J2AHCjGwxQubNJfE70SXXQ==
+
object-keys@^1.0.11:
version "1.1.0"
resolved "https://registry.yarnpkg.com/object-keys/-/object-keys-1.1.0.tgz#11bd22348dd2e096a045ab06f6c85bcc340fa032"
@@ -8774,6 +8931,11 @@ object-keys@^1.0.12:
resolved "https://registry.yarnpkg.com/object-keys/-/object-keys-1.0.12.tgz#09c53855377575310cca62f55bb334abff7b3ed2"
integrity sha512-FTMyFUm2wBcGHnH2eXmz7tC6IwlqQZ6mVZ+6dm6vZ4IQIHjs6FdNsQBuKGPuUUUY6NfJw2PshC08Tn6LzLDOag==
+object-keys@^1.1.1:
+ version "1.1.1"
+ resolved "https://registry.yarnpkg.com/object-keys/-/object-keys-1.1.1.tgz#1c47f272df277f3b1daf061677d9c82e2322c60e"
+ integrity sha512-NuAESUOUMrlIXOfHKzD6bpPu3tYt3xvjNdRIQ+FeT0lNb4K8WR70CaDxhuNguS2XG+GjkyMwOzsN5ZktImfhLA==
+
object-visit@^1.0.0:
version "1.0.1"
resolved "https://registry.yarnpkg.com/object-visit/-/object-visit-1.0.1.tgz#f79c4493af0c5377b59fe39d395e41042dd045bb"
@@ -8850,6 +9012,16 @@ object.values@^1.0.4:
function-bind "^1.1.0"
has "^1.0.1"
+object.values@^1.1.0:
+ version "1.1.0"
+ resolved "https://registry.yarnpkg.com/object.values/-/object.values-1.1.0.tgz#bf6810ef5da3e5325790eaaa2be213ea84624da9"
+ integrity sha512-8mf0nKLAoFX6VlNVdhGj31SVYpaNFtUnuoOXWyFEstsWRgU837AK+JYM0iAxwkSzGRbwn8cbFmgbyxj1j4VbXg==
+ dependencies:
+ define-properties "^1.1.3"
+ es-abstract "^1.12.0"
+ function-bind "^1.1.1"
+ has "^1.0.3"
+
observable-to-promise@^0.4.0:
version "0.4.0"
resolved "https://registry.yarnpkg.com/observable-to-promise/-/observable-to-promise-0.4.0.tgz#28afe71645308f2d41d71f47ad3fece1a377e52b"
@@ -9259,7 +9431,7 @@ path-is-absolute@^1.0.0, path-is-absolute@^1.0.1:
resolved "https://registry.yarnpkg.com/path-is-absolute/-/path-is-absolute-1.0.1.tgz#174b9268735534ffbc7ace6bf53a5a9e1b5c5f5f"
integrity sha1-F0uSaHNVNP+8es5r9TpanhtcX18=
-path-is-inside@^1.0.1, path-is-inside@^1.0.2:
+path-is-inside@^1.0.1:
version "1.0.2"
resolved "https://registry.yarnpkg.com/path-is-inside/-/path-is-inside-1.0.2.tgz#365417dede44430d1c11af61027facf074bdfc53"
integrity sha1-NlQX3t5EQw0cEa9hAn+s8HS9/FM=
@@ -9473,6 +9645,16 @@ popper.js@^1.14.7:
resolved "https://registry.yarnpkg.com/popper.js/-/popper.js-1.14.7.tgz#e31ec06cfac6a97a53280c3e55e4e0c860e7738e"
integrity sha512-4q1hNvoUre/8srWsH7hnoSJ5xVmIL4qgz+s4qf2TnJIMyZFUFMGH+9vE7mXynAlHSZ/NdTmmow86muD0myUkVQ==
+popper.js@^1.15.0:
+ version "1.15.0"
+ resolved "https://registry.yarnpkg.com/popper.js/-/popper.js-1.15.0.tgz#5560b99bbad7647e9faa475c6b8056621f5a4ff2"
+ integrity sha512-w010cY1oCUmI+9KwwlWki+r5jxKfTFDVoadl7MSrIujHU5MJ5OR6HTDj6Xo8aoR/QsA56x8jKjA59qGH4ELtrA==
+
+portal-vue@^2.1.6:
+ version "2.1.6"
+ resolved "https://registry.yarnpkg.com/portal-vue/-/portal-vue-2.1.6.tgz#a7d4790b14a79af7fd159a60ec88c30cddc6c639"
+ integrity sha512-lvCF85D4e8whd0nN32D8FqKwwkk7nYUI3Ku8UAEx4Z1reomu75dv5evRUTZNaj1EalxxWNXiNl0EHRq36fG8WA==
+
portfinder@^1.0.9:
version "1.0.18"
resolved "https://registry.yarnpkg.com/portfinder/-/portfinder-1.0.18.tgz#cf1106ff336fd4329b7ce32fda7d17d62c6bcf37"
@@ -10644,6 +10826,15 @@ readable-stream@^3.0.6:
string_decoder "^1.1.1"
util-deprecate "^1.0.1"
+readable-stream@^3.1.1:
+ version "3.4.0"
+ resolved "https://registry.yarnpkg.com/readable-stream/-/readable-stream-3.4.0.tgz#a51c26754658e0a3c21dbf59163bd45ba6f447fc"
+ integrity sha512-jItXPLmrSR8jmTRmRWJXCnGJsfy85mB3Wd/uINMXA65yrnFo0cPClFIUWzo2najVNSl+mx7/4W8ttlLWJe99pQ==
+ dependencies:
+ inherits "^2.0.3"
+ string_decoder "^1.1.1"
+ util-deprecate "^1.0.1"
+
readable-stream@~1.0.2:
version "1.0.34"
resolved "https://registry.yarnpkg.com/readable-stream/-/readable-stream-1.0.34.tgz#125820e34bc842d2f2aaafafe4c2916ee32c157c"
@@ -11101,7 +11292,14 @@ resolve@^1.1.6, resolve@^1.1.7, resolve@^1.3.2:
dependencies:
path-parse "^1.0.5"
-resolve@^1.4.0, resolve@^1.9.0:
+resolve@^1.11.0, resolve@^1.12.0:
+ version "1.12.0"
+ resolved "https://registry.yarnpkg.com/resolve/-/resolve-1.12.0.tgz#3fc644a35c84a48554609ff26ec52b66fa577df6"
+ integrity sha512-B/dOmuoAik5bKcD6s6nXDCjzUKnaDvdkRyAk6rsmsKLipWj4797iothd7jmmUhWTfinVMU+wc56rYKsit2Qy4w==
+ dependencies:
+ path-parse "^1.0.6"
+
+resolve@^1.4.0:
version "1.10.0"
resolved "https://registry.yarnpkg.com/resolve/-/resolve-1.10.0.tgz#3bdaaeaf45cc07f375656dfd2e54ed0810b101ba"
integrity sha512-3sUr9aq5OfSg2S9pNtPA9hL1FVEAjvfOC4leW0SNf/mpnaakz2a9femSd6LqAww2RaFctwyf1lCqnTHuF1rxDg==
@@ -11320,11 +11518,16 @@ semver-greatest-satisfied-range@^1.1.0:
dependencies:
sver-compat "^1.5.0"
-"semver@2 || 3 || 4 || 5", semver@^5.0.3, semver@^5.1.0, semver@^5.3.0, semver@^5.4.1, semver@^5.5.0, semver@^5.5.1, semver@^5.6.0:
+"semver@2 || 3 || 4 || 5", semver@^5.0.3, semver@^5.1.0, semver@^5.3.0, semver@^5.4.1, semver@^5.5.0, semver@^5.6.0:
version "5.6.0"
resolved "https://registry.yarnpkg.com/semver/-/semver-5.6.0.tgz#7e74256fbaa49c75aa7c7a205cc22799cac80004"
integrity sha512-RS9R6R35NYgQn++fkDWaOmqGoj4Ek9gGs+DPxNUZKuwE183xjJroKvyo1IzVFeXvUrvmALy6FWD5xrdJT25gMg==
+semver@^6.1.2:
+ version "6.3.0"
+ resolved "https://registry.yarnpkg.com/semver/-/semver-6.3.0.tgz#ee0a64c8af5e8ceea67687b133761e1becbd1d3d"
+ integrity sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==
+
semver@~4.3.3:
version "4.3.6"
resolved "https://registry.yarnpkg.com/semver/-/semver-4.3.6.tgz#300bc6e0e86374f7ba61068b5b1ecd57fc6532da"
@@ -11903,6 +12106,22 @@ string-width@^3.0.0:
is-fullwidth-code-point "^2.0.0"
strip-ansi "^5.1.0"
+string.prototype.trimleft@^2.0.0:
+ version "2.1.0"
+ resolved "https://registry.yarnpkg.com/string.prototype.trimleft/-/string.prototype.trimleft-2.1.0.tgz#6cc47f0d7eb8d62b0f3701611715a3954591d634"
+ integrity sha512-FJ6b7EgdKxxbDxc79cOlok6Afd++TTs5szo+zJTUyow3ycrRfJVE2pq3vcN53XexvKZu/DJMDfeI/qMiZTrjTw==
+ dependencies:
+ define-properties "^1.1.3"
+ function-bind "^1.1.1"
+
+string.prototype.trimright@^2.0.0:
+ version "2.1.0"
+ resolved "https://registry.yarnpkg.com/string.prototype.trimright/-/string.prototype.trimright-2.1.0.tgz#669d164be9df9b6f7559fa8e89945b168a5a6c58"
+ integrity sha512-fXZTSV55dNBwv16uw+hh5jkghxSnc5oHq+5K/gXgizHwAvMetdAJlHqqoFC1FSDVPYWLkAKl2cxpUT41sV7nSg==
+ dependencies:
+ define-properties "^1.1.3"
+ function-bind "^1.1.1"
+
string_decoder@^1.0.0, string_decoder@~1.1.1:
version "1.1.1"
resolved "https://registry.yarnpkg.com/string_decoder/-/string_decoder-1.1.1.tgz#9cf1611ba62685d7030ae9e4ba34149c3af03fc8"
@@ -11978,6 +12197,13 @@ strip-ansi@^5.1.0:
dependencies:
ansi-regex "^4.1.0"
+strip-ansi@^5.2.0:
+ version "5.2.0"
+ resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-5.2.0.tgz#8c9a536feb6afc962bdfa5b104a5091c1ad9c0ae"
+ integrity sha512-DuRs1gKbBqsMKIZlrffwlug8MHkcnpjs5VPmL1PAh+mA30U0DTotfDZ0d2UUsXpPmPmMMJ6W773MaA3J+lbiWA==
+ dependencies:
+ ansi-regex "^4.1.0"
+
strip-ansi@~0.1.0:
version "0.1.1"
resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-0.1.1.tgz#39e8a98d044d150660abe4a6808acf70bb7bc991"
@@ -12007,7 +12233,12 @@ strip-indent@^1.0.1:
dependencies:
get-stdin "^4.0.1"
-strip-json-comments@^2.0.1, strip-json-comments@~2.0.1:
+strip-json-comments@^3.0.1:
+ version "3.0.1"
+ resolved "https://registry.yarnpkg.com/strip-json-comments/-/strip-json-comments-3.0.1.tgz#85713975a91fb87bf1b305cca77395e40d2a64a7"
+ integrity sha512-VTyMAUfdm047mwKl+u79WIdrZxtFtn+nBxHeb844XBQ9uMNTuTHdx2hc5RiAJYqwTj3wc/xe5HLSdJSkJ+WfZw==
+
+strip-json-comments@~2.0.1:
version "2.0.1"
resolved "https://registry.yarnpkg.com/strip-json-comments/-/strip-json-comments-2.0.1.tgz#3c531942e908c2697c0ec344858c286c7ca0a60a"
integrity sha1-PFMZQukIwml8DsNEhYwobHygpgo=
@@ -12808,6 +13039,11 @@ v8-compile-cache@^2.0.2:
resolved "https://registry.yarnpkg.com/v8-compile-cache/-/v8-compile-cache-2.0.2.tgz#a428b28bb26790734c4fc8bc9fa106fccebf6a6c"
integrity sha512-1wFuMUIM16MDJRCrpbpuEPTUGmM5QMUg0cr3KFwra2XgOgFcPGDQHDh3CszSCD2Zewc/dh/pamNEW8CbfDebUw==
+v8-compile-cache@^2.0.3:
+ version "2.1.0"
+ resolved "https://registry.yarnpkg.com/v8-compile-cache/-/v8-compile-cache-2.1.0.tgz#e14de37b31a6d194f5690d67efc4e7f6fc6ab30e"
+ integrity sha512-usZBT3PW+LOjM25wbqIlZwPeJV+3OSz3M1k1Ws8snlW39dZyYL9lOGC5FgPVHfk0jKmjiDV8Z0mIbVQPiwFs7g==
+
v8flags@^3.0.1:
version "3.1.2"
resolved "https://registry.yarnpkg.com/v8flags/-/v8flags-3.1.2.tgz#fc5cd0c227428181e6c29b2992e4f8f1da5e0c9f"
@@ -12968,10 +13204,10 @@ vue-eslint-parser@^5.0.0:
esquery "^1.0.1"
lodash "^4.17.11"
-vue-functional-data-merge@^2.0.7:
- version "2.0.7"
- resolved "https://registry.yarnpkg.com/vue-functional-data-merge/-/vue-functional-data-merge-2.0.7.tgz#bdee655181eacdcb1f96ce95a4cc14e75313d1da"
- integrity sha512-pvLc+H+x2prwBj/uSEIITyxjz/7ZUVVK8uYbrYMmhDvMXnzh9OvQvVEwcOSBQjsubd4Eq41/CSJaWzy4hemMNQ==
+vue-functional-data-merge@^3.1.0:
+ version "3.1.0"
+ resolved "https://registry.yarnpkg.com/vue-functional-data-merge/-/vue-functional-data-merge-3.1.0.tgz#08a7797583b7f35680587f8a1d51d729aa1dc657"
+ integrity sha512-leT4kdJVQyeZNY1kmnS1xiUlQ9z1B/kdBFCILIjYYQDqZgLqCLa0UhjSSeRX6c3mUe6U5qYeM8LrEqkHJ1B4LA==
vue-hot-reload-api@^2.3.0:
version "2.3.1"
diff --git a/config/plugins/interactive_environments/bam_iobio/config/bam_iobio.ini.sample b/config/plugins/interactive_environments/bam_iobio/config/bam_iobio.ini.sample
index 8b8fd9b41f80..534033e0e778 100644
--- a/config/plugins/interactive_environments/bam_iobio/config/bam_iobio.ini.sample
+++ b/config/plugins/interactive_environments/bam_iobio/config/bam_iobio.ini.sample
@@ -45,11 +45,3 @@ image = qiaoy/iobio-bundle.bam-iobio:1.0-ondemand
# which ports it needs to connect. With this option you can specify the port number
# inside your container to which Galaxy should connect the UI.
#docker_connect_port = None
-
-# To run containers in Docker Swarm mode on (an existing swarm), set the
-# following option to True *and*:
-# - set docker_connect_port above. For qiaoy/iobio-bundle.bam-iobio the port
-# should most likely be 8000.
-# - If command_inject is uncommented and includes `--sig-proxy`, that option should
-# be removed.
-#swarm_mode = False
diff --git a/config/plugins/interactive_environments/jupyter/config/jupyter.ini.sample b/config/plugins/interactive_environments/jupyter/config/jupyter.ini.sample
index e632f553ec1a..becd23834817 100644
--- a/config/plugins/interactive_environments/jupyter/config/jupyter.ini.sample
+++ b/config/plugins/interactive_environments/jupyter/config/jupyter.ini.sample
@@ -48,11 +48,3 @@
# Set the following value to false if Docker volumes between Galaxy server and Docker
# container cannot or should not be used.
#use_volumes = True
-
-# To run containers in Docker Swarm mode on (an existing swarm), set the
-# following option to True *and*:
-# - set docker_connect_port above. For Jupyter the # port should most likely be
-# 8888.
-# - If command_inject is uncommented and includes `--sig-proxy`, that option should
-# be removed.
-#swarm_mode = False
diff --git a/config/plugins/interactive_environments/neo/config/neo.ini.sample b/config/plugins/interactive_environments/neo/config/neo.ini.sample
index 0e84731e033d..a03698fd72fb 100644
--- a/config/plugins/interactive_environments/neo/config/neo.ini.sample
+++ b/config/plugins/interactive_environments/neo/config/neo.ini.sample
@@ -45,11 +45,3 @@ command_inject = --sig-proxy=true -e DEBUG=false -e DEFAULT_CONTAINER_RUNTIME=12
# which ports it needs to connect. With this option you can specify the port number
# inside your container to which Galaxy should connect the UI.
#docker_connect_port = None
-
-# To run containers in Docker Swarm mode on (an existing swarm), set the
-# following option to True *and*:
-# - set docker_connect_port above. For quay.io/sanbi-sa/neo_ie:3.1.9 the port
-# should most likely be 80.
-# - If command_inject is uncommented and includes `--sig-proxy`, that option should
-# be removed.
-#swarm_mode = False
diff --git a/config/plugins/interactive_environments/phinch/config/phinch.ini.sample b/config/plugins/interactive_environments/phinch/config/phinch.ini.sample
index c9dc8afba7c1..39451ec6d0db 100644
--- a/config/plugins/interactive_environments/phinch/config/phinch.ini.sample
+++ b/config/plugins/interactive_environments/phinch/config/phinch.ini.sample
@@ -24,11 +24,3 @@ image = shiltemann/docker-phinch-galaxy:16.04
# which ports it needs to connect. With this option you can specify the port number
# inside your container to which Galaxy should connect the UI.
#docker_connect_port = None
-
-# To run containers in Docker Swarm mode on (an existing swarm), set the
-# following option to True *and*:
-# - set docker_connect_port above. For shiltemann/docker-phinch-galaxy the port
-# should most likely be 80.
-# - If command_inject is uncommented and includes `--sig-proxy`, that option should
-# be removed.
-#swarm_mode = False
diff --git a/config/plugins/interactive_environments/rstudio/config/rstudio.ini.sample b/config/plugins/interactive_environments/rstudio/config/rstudio.ini.sample
index 6eff4b726dea..cf8d69e09cae 100644
--- a/config/plugins/interactive_environments/rstudio/config/rstudio.ini.sample
+++ b/config/plugins/interactive_environments/rstudio/config/rstudio.ini.sample
@@ -50,11 +50,3 @@ docker_connect_port = 80
# Set the following value to false if Docker volumes between Galaxy server and Docker
# container cannot or should not be used.
#use_volumes = True
-
-# To run containers in Docker Swarm mode on (an existing swarm), set the
-# following option to True *and*:
-# - set docker_connect_port above. For erasche/docker-rstudio-notebook the port
-# should most likely be 80.
-# - If command_inject is uncommented and includes `--sig-proxy`, that option should
-# be removed.
-#swarm_mode = False
diff --git a/config/swarm_manager_conf.yml.sample b/config/swarm_manager_conf.yml.sample
deleted file mode 120000
index 9118a9aa59f8..000000000000
--- a/config/swarm_manager_conf.yml.sample
+++ /dev/null
@@ -1 +0,0 @@
-../lib/galaxy/config/sample/swarm_manager_conf.yml.sample
\ No newline at end of file
diff --git a/display_applications/biom/biom_simple.xml b/display_applications/biom/biom_simple.xml
index 9cf1b5256bfd..787a4b3ef4dc 100644
--- a/display_applications/biom/biom_simple.xml
+++ b/display_applications/biom/biom_simple.xml
@@ -1,7 +1,7 @@
-
-
-
- ${ url % { 'biom_file_url_qp': $biom_file.qp } }
-
-
-
+
+
+
+ ${ url % { 'biom_file_url_qp': $biom_file.qp } }
+
+
+
diff --git a/display_applications/icn3d/icn3d_simple.xml b/display_applications/icn3d/icn3d_simple.xml
index 7623130a7595..6e76c8987be6 100644
--- a/display_applications/icn3d/icn3d_simple.xml
+++ b/display_applications/icn3d/icn3d_simple.xml
@@ -1,7 +1,7 @@
-
-
-
- ${ url % { 'icn3d_file_type': $icn3d_file.ext, 'icn3d_file_url_qp': $icn3d_file.qp } }
-
-
-
+
+
+
+ ${ url % { 'icn3d_file_type': $icn3d_file.ext, 'icn3d_file_url_qp': $icn3d_file.qp } }
+
+
+
diff --git a/display_applications/igv/bam.xml b/display_applications/igv/bam.xml
index 3dcb92359f77..82a514aae841 100644
--- a/display_applications/igv/bam.xml
+++ b/display_applications/igv/bam.xml
@@ -1,94 +1,94 @@
-
-
-
-
-
-
-
-
-
-
-
- ${$site_id.startswith( 'local_' ) or $dataset.dbkey in $site_dbkeys}
-
- ${redirect_url}
-
-
-
- #if ($dataset.dbkey in $site_dbkeys)
- $site_organisms[ $site_dbkeys.index( $bam_file.dbkey ) ]
- #else:
- $bam_file.dbkey
- #end if
-
-
-
-
- IGV 1.5
- The Broad Institute
-
- IGV Software
- IGV
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- -g
- ${site_organism}
- ${bam_file.url}
-
-
-]]>
-
-
- #if $site_id.startswith( 'local_' )
- ${site_link}?file=${bam_file.qp}&genome=${site_organism}&merge=true&name=${qp( ( $bam_file.name or $DATASET_HASH ).replace( ',', ';' ) )}
- #elif $site_id.startswith( 'web_link_' ):
- ${site_link}?sessionURL=${bam_file.qp}&genome=${site_organism}&merge=true&name=${qp( ( $bam_file.name or $DATASET_HASH ).replace( ',', ';' ) )}
- #else:
- ${jnlp.url}
- #end if
-
-
-
-
- ${ $dataset.dbkey == $value }
-
- http://www.broadinstitute.org/igv/projects/current/igv.php?sessionURL=${bam_file.qp}&genome=${bam_file.dbkey}&merge=true&name=${qp( ( $bam_file.name or $DATASET_HASH ).replace( ',', ';' ) )}
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+ ${$site_id.startswith( 'local_' ) or $dataset.dbkey in $site_dbkeys}
+
+ ${redirect_url}
+
+
+
+ #if ($dataset.dbkey in $site_dbkeys)
+ $site_organisms[ $site_dbkeys.index( $bam_file.dbkey ) ]
+ #else:
+ $bam_file.dbkey
+ #end if
+
+
+
+
+ IGV 1.5
+ The Broad Institute
+
+ IGV Software
+ IGV
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ -g
+ ${site_organism}
+ ${bam_file.url}
+
+
+]]>
+
+
+ #if $site_id.startswith( 'local_' )
+ ${site_link}?file=${bam_file.qp}&genome=${site_organism}&merge=true&name=${qp( ( $bam_file.name or $DATASET_HASH ).replace( ',', ';' ) )}
+ #elif $site_id.startswith( 'web_link_' ):
+ ${site_link}?sessionURL=${bam_file.qp}&genome=${site_organism}&merge=true&name=${qp( ( $bam_file.name or $DATASET_HASH ).replace( ',', ';' ) )}
+ #else:
+ ${jnlp.url}
+ #end if
+
+
+
+
+ ${ $dataset.dbkey == $value }
+
+ http://www.broadinstitute.org/igv/projects/current/igv.php?sessionURL=${bam_file.qp}&genome=${bam_file.dbkey}&merge=true&name=${qp( ( $bam_file.name or $DATASET_HASH ).replace( ',', ';' ) )}
+
+
+
+
+
diff --git a/display_applications/igv/gff.xml b/display_applications/igv/gff.xml
index 4eabd5e578ef..78b6e6a357fa 100644
--- a/display_applications/igv/gff.xml
+++ b/display_applications/igv/gff.xml
@@ -1,92 +1,92 @@
-
-
-
-
-
-
-
-
-
-
-
- ${$site_id.startswith( 'local_' ) or $dataset.dbkey in $site_dbkeys}
-
- ${redirect_url}
-
-
- #if ($dataset.dbkey in $site_dbkeys)
- $site_organisms[ $site_dbkeys.index( $gff_file.dbkey ) ]
- #else:
- $gff_file.dbkey
- #end if
-
-
-
-
- IGV 1.5
- The Broad Institute
-
- IGV Software
- IGV
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- -g
- ${site_organism}
- ${gff_file.url}
-
-
-]]>
-
-
- #if $site_id.startswith( 'local_' )
- ${site_link}?file=${gff_file.qp}&genome=${site_organism}&merge=true&name=${qp( ( $gff_file.name or $DATASET_HASH ).replace( ',', ';' ) )}
- #elif $site_id.startswith( 'web_link_' ):
- ${site_link}?sessionURL=${gff_file.qp}&genome=${site_organism}&merge=true&name=${qp( ( $gff_file.name or $DATASET_HASH ).replace( ',', ';' ) )}
- #else:
- ${jnlp.url}
- #end if
-
-
-
-
- ${ $dataset.dbkey == $value }
-
- http://www.broadinstitute.org/igv/projects/current/igv.php?sessionURL=${gff_file.qp}&genome=${gff_file.dbkey}&merge=true&name=${qp( ( $gff_file.name or $DATASET_HASH ).replace( ',', ';' ) )}
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+ ${$site_id.startswith( 'local_' ) or $dataset.dbkey in $site_dbkeys}
+
+ ${redirect_url}
+
+
+ #if ($dataset.dbkey in $site_dbkeys)
+ $site_organisms[ $site_dbkeys.index( $gff_file.dbkey ) ]
+ #else:
+ $gff_file.dbkey
+ #end if
+
+
+
+
+ IGV 1.5
+ The Broad Institute
+
+ IGV Software
+ IGV
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ -g
+ ${site_organism}
+ ${gff_file.url}
+
+
+]]>
+
+
+ #if $site_id.startswith( 'local_' )
+ ${site_link}?file=${gff_file.qp}&genome=${site_organism}&merge=true&name=${qp( ( $gff_file.name or $DATASET_HASH ).replace( ',', ';' ) )}
+ #elif $site_id.startswith( 'web_link_' ):
+ ${site_link}?sessionURL=${gff_file.qp}&genome=${site_organism}&merge=true&name=${qp( ( $gff_file.name or $DATASET_HASH ).replace( ',', ';' ) )}
+ #else:
+ ${jnlp.url}
+ #end if
+
+
+
+
+ ${ $dataset.dbkey == $value }
+
+ http://www.broadinstitute.org/igv/projects/current/igv.php?sessionURL=${gff_file.qp}&genome=${gff_file.dbkey}&merge=true&name=${qp( ( $gff_file.name or $DATASET_HASH ).replace( ',', ';' ) )}
+
+
+
+
diff --git a/display_applications/igv/interval_as_bed.xml b/display_applications/igv/interval_as_bed.xml
index 4fd6a98095fb..aee3acc5bdf5 100644
--- a/display_applications/igv/interval_as_bed.xml
+++ b/display_applications/igv/interval_as_bed.xml
@@ -1,92 +1,92 @@
-
-
-
-
-
-
-
-
-
-
-
- ${$site_id.startswith( 'local_' ) or $dataset.dbkey in $site_dbkeys}
-
- ${redirect_url}
-
-
- #if ($dataset.dbkey in $site_dbkeys)
- $site_organisms[ $site_dbkeys.index( $bed_file.dbkey ) ]
- #else:
- $bed_file.dbkey
- #end if
-
-
-
-
- IGV 1.5
- The Broad Institute
-
- IGV Software
- IGV
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- -g
- ${site_organism}
- ${bed_file.url}
-
-
-]]>
-
-
- #if $site_id.startswith( 'local_' )
- ${site_link}?file=${bed_file.qp}&genome=${site_organism}&merge=true&name=${qp( ( $bed_file.name or $DATASET_HASH ).replace( ',', ';' ) )}
- #elif $site_id.startswith( 'web_link_' ):
- ${site_link}?sessionURL=${bed_file.qp}&genome=${site_organism}&merge=true&name=${qp( ( $bed_file.name or $DATASET_HASH ).replace( ',', ';' ) )}
- #else:
- ${jnlp.url}
- #end if
-
-
-
-
- ${ $dataset.dbkey == $value }
-
- http://www.broadinstitute.org/igv/projects/current/igv.php?sessionURL=${bed_file.qp}&genome=${bed_file.dbkey}&merge=true&name=${qp( ( $bed_file.name or $DATASET_HASH ).replace( ',', ';' ) )}
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+ ${$site_id.startswith( 'local_' ) or $dataset.dbkey in $site_dbkeys}
+
+ ${redirect_url}
+
+
+ #if ($dataset.dbkey in $site_dbkeys)
+ $site_organisms[ $site_dbkeys.index( $bed_file.dbkey ) ]
+ #else:
+ $bed_file.dbkey
+ #end if
+
+
+
+
+ IGV 1.5
+ The Broad Institute
+
+ IGV Software
+ IGV
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ -g
+ ${site_organism}
+ ${bed_file.url}
+
+
+]]>
+
+
+ #if $site_id.startswith( 'local_' )
+ ${site_link}?file=${bed_file.qp}&genome=${site_organism}&merge=true&name=${qp( ( $bed_file.name or $DATASET_HASH ).replace( ',', ';' ) )}
+ #elif $site_id.startswith( 'web_link_' ):
+ ${site_link}?sessionURL=${bed_file.qp}&genome=${site_organism}&merge=true&name=${qp( ( $bed_file.name or $DATASET_HASH ).replace( ',', ';' ) )}
+ #else:
+ ${jnlp.url}
+ #end if
+
+
+
+
+ ${ $dataset.dbkey == $value }
+
+ http://www.broadinstitute.org/igv/projects/current/igv.php?sessionURL=${bed_file.qp}&genome=${bed_file.dbkey}&merge=true&name=${qp( ( $bed_file.name or $DATASET_HASH ).replace( ',', ';' ) )}
+
+
+
+
diff --git a/display_applications/intermine/intermine_simple.xml b/display_applications/intermine/intermine_simple.xml
index 4b566ca3fd68..3bb2d58ce5ee 100644
--- a/display_applications/intermine/intermine_simple.xml
+++ b/display_applications/intermine/intermine_simple.xml
@@ -1,7 +1,7 @@
-
-
-
- ${ url % { 'intermine_file_url_qp': $intermine_file.qp } }
-
-
-
+
+
+
+ ${ url % { 'intermine_file_url_qp': $intermine_file.qp } }
+
+
+
diff --git a/display_applications/iobio/bam.xml b/display_applications/iobio/bam.xml
index 69cdabf6b427..931baedc1294 100644
--- a/display_applications/iobio/bam.xml
+++ b/display_applications/iobio/bam.xml
@@ -1,8 +1,8 @@
-
-
-
- ${url}?bam=${bam_file.qp}
-
-
-
-
+
+
+
+ ${url}?bam=${bam_file.qp}
+
+
+
+
diff --git a/doc/source/admin/galaxy_options.rst b/doc/source/admin/galaxy_options.rst
index 29a9dab6194d..d6c9da0d4ea8 100644
--- a/doc/source/admin/galaxy_options.rst
+++ b/doc/source/admin/galaxy_options.rst
@@ -6,7 +6,7 @@
The directory that will be prepended to relative paths in options
specifying other Galaxy config files (e.g. datatypes_config_file).
Defaults to the directory in which galaxy.yml is located.
-:Default: ``false``
+:Default: ``None``
:Type: str
@@ -19,7 +19,7 @@
specifying Galaxy data/cache directories and files (such as the
default SQLite database, file_path, etc.). Defaults to `database/`
if running Galaxy from source or `/data` otherwise.
-:Default: ``false``
+:Default: ``None``
:Type: str
@@ -33,7 +33,9 @@
string to specify an external database instead. This string takes
many options which are explained in detail in the config file
documentation.
-:Default: ``sqlite:///./database/universe.sqlite?isolation_level=IMMEDIATE``
+ Sample default
+ 'sqlite:////universe.sqlite?isolation_level=IMMEDIATE'
+:Default: ``None``
:Type: str
@@ -108,7 +110,7 @@
on an existing template database. This will set that. This is
probably only useful for testing but documentation is included
here for completeness.
-:Default: ````
+:Default: ``None``
:Type: str
@@ -121,8 +123,8 @@
below will be logged to debug. A value of '0' is disabled. For
example, you would set this to .005 to log all queries taking
longer than 5 milliseconds.
-:Default: ``0``
-:Type: int
+:Default: ``0.0``
+:Type: float
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -151,7 +153,8 @@
can be used to separate the tool shed install database (all other
options listed above but prefixed with install_ are also
available).
-:Default: ``sqlite:///./database/universe.sqlite?isolation_level=IMMEDIATE``
+ Defaults to the value of the 'database_connection' option.
+:Default: ``None``
:Type: str
@@ -204,10 +207,12 @@
~~~~~~~~~~~~~
:Description:
- Where dataset files are stored. It must accessible at the same
+ Where dataset files are stored. It must be accessible at the same
path on any cluster nodes that will run Galaxy jobs, unless using
Pulsar.
-:Default: ``database/files``
+ Default value will be resolved to 'database/files' where
+ 'database' is the default value of the 'data_dir' option).
+:Default: ``files``
:Type: str
@@ -216,10 +221,12 @@
~~~~~~~~~~~~~~~~~
:Description:
- Where temporary files are stored. It must accessible at the same
- path on any cluster nodes that will run Galaxy jobs, unless using
- Pulsar.
-:Default: ``database/tmp``
+ Where temporary files are stored. It must be accessible at the
+ same path on any cluster nodes that will run Galaxy jobs, unless
+ using Pulsar.
+ Default value will be resolved to 'database/tmp' where 'database'
+ is the default value of the 'data_dir' option).
+:Default: ``tmp``
:Type: str
@@ -231,8 +238,28 @@
Tool config files, defines what tools are available in Galaxy.
Tools can be locally developed or installed from Galaxy tool
sheds. (config/tool_conf.xml.sample will be used if left unset and
- config/tool_conf.xml does not exist).
-:Default: ``config/tool_conf.xml,config/shed_tool_conf.xml``
+ config/tool_conf.xml does not exist). Can be a single file, a list
+ of files, or (for backwards compatibility) a comma-separated list
+ of files.
+:Default: ``config/tool_conf.xml``
+:Type: any
+
+
+~~~~~~~~~~~~~~~~~~~~~~~~~
+``shed_tool_config_file``
+~~~~~~~~~~~~~~~~~~~~~~~~~
+
+:Description:
+ Tool config file for tools installed from the Galaxy Tool Shed.
+ Must be writable by Galaxy and generally should not be edited by
+ hand. In older Galaxy releases, this file was part of the
+ tool_config_file option. It is still possible to specify this file
+ (and other shed-enabled tool config files) in tool_config_file,
+ but in the standard case of a single shed-enabled tool config,
+ this option is preferable. This file will be created automatically
+ upon tool installation, whereas Galaxy will fail to start if any
+ files in tool_config_file cannot be read.
+:Default: ``config/shed_tool_conf.xml``
:Type: str
@@ -261,7 +288,9 @@
migration scripts to install tools that have been migrated to the
tool shed upon a new release, they will be added to this tool
config file.
-:Default: ``config/migrated_tools_conf.xml``
+ Default value will be resolved to 'config/migrated_tools_conf.xml'
+ where 'config' is the default value of the 'config_dir' option).
+:Default: ``migrated_tools_conf.xml``
:Type: str
@@ -299,11 +328,14 @@
Various dependency resolver configuration parameters will have
defaults set relative to this path, such as the default conda
prefix, default Galaxy packages path, legacy tool shed
- dependencies path, and the dependency cache directory. Set the
- string to None to explicitly disable tool dependency handling. If
- this option is set to none or an invalid path, installing tools
- with dependencies from the Tool Shed or in Conda will fail.
-:Default: ``database/dependencies``
+ dependencies path, and the dependency cache directory.
+ Set the string to null to explicitly disable tool dependency
+ handling. If this option is set to none or an invalid path,
+ installing tools with dependencies from the Tool Shed or in Conda
+ will fail.
+ Default value will be resolved to 'database/dependencies' where
+ 'database' is the default value of the 'data_dir' option).
+:Default: ``dependencies``
:Type: str
@@ -316,9 +348,9 @@
options for how Galaxy resolves tool dependencies (requirement
tags in Tool XML). The default ordering is to the use the Tool
Shed for tools installed that way, use local Galaxy packages, and
- then use Conda if available. See https://github.com/galaxyproject/
- galaxy/blob/dev/doc/source/admin/dependency_resolvers.rst for more
- information on these options.
+ then use Conda if available. See
+ https://github.com/galaxyproject/galaxy/blob/dev/doc/source/admin/dependency_resolvers.rst
+ for more information on these options.
:Default: ``config/dependency_resolvers_conf.xml``
:Type: str
@@ -329,11 +361,9 @@
:Description:
conda_prefix is the location on the filesystem where Conda
- packages and environments are installed IMPORTANT: Due to a
- current limitation in conda, the total length of the conda_prefix
- and the job_working_directory path should be less than 50
- characters!
-:Default: ``/_conda``
+ packages and environments are installed.
+ Sample default '/_conda'
+:Default: ``None``
:Type: str
@@ -429,9 +459,10 @@
share. Set this option to true to cache the dependencies in a
folder. This option is beta and should only be used if you
experience long waiting times before a job is actually submitted
- to your cluster. This only affects tools where some requirements
- can be resolved but not others, most modern best practice tools
- can use prebuilt environments in the Conda directory.
+ to your cluster.
+ This only affects tools where some requirements can be resolved
+ but not others, most modern best practice tools can use prebuilt
+ environments in the Conda directory.
:Default: ``false``
:Type: bool
@@ -442,8 +473,9 @@
:Description:
By default the tool_dependency_cache_dir is the _cache directory
- of the tool dependency directory
-:Default: ``/_cache``
+ of the tool dependency directory.
+ Sample default '/_cache'
+:Default: ``None``
:Type: str
@@ -560,7 +592,7 @@
container resolvers to use when discovering containers for Galaxy.
If this is set to None, the default containers loaded is
determined by enable_mulled_containers.
-:Default: ````
+:Default: ``None``
:Type: str
@@ -575,7 +607,8 @@
the Galaxy host. This is ignored if the relevant container
resolver isn't enabled, and will install on demand unless
involucro_auto_init is set to false.
-:Default: ``database/dependencies/involucro``
+ Sample default '/involucro'
+:Default: ``None``
:Type: str
@@ -689,8 +722,9 @@
:Description:
Directory where Tool Data Table related files will be placed when
- installed from a ToolShed. Defaults to tool_data_path.
-:Default: ``tool-data``
+ installed from a ToolShed. Defaults to the value of the
+ 'tool_data_path' option.
+:Default: ``None``
:Type: str
@@ -717,8 +751,9 @@
~~~~~~~~~~~~~~~~~~~~
:Description:
- File containing old-style genome builds
-:Default: ``tool-data/shared/ucsc/builds.txt``
+ File containing old-style genome builds. Value will be resolved
+ with respect to .
+:Default: ``shared/ucsc/builds.txt``
:Type: str
@@ -728,8 +763,9 @@
:Description:
Directory where chrom len files are kept, currently mainly used by
- trackster
-:Default: ``tool-data/shared/ucsc/chrom``
+ trackster. Value will be resolved with respect to
+ .
+:Default: ``shared/ucsc/chrom``
:Type: str
@@ -778,7 +814,7 @@
Visualizations config directory: where to look for individual
visualization plugins. The path is relative to the Galaxy root
dir. To use an absolute path begin the path with '/'. This is a
- comma separated list. Defaults to "config/plugins/visualizations".
+ comma-separated list.
:Default: ``config/plugins/visualizations``
:Type: str
@@ -794,41 +830,11 @@
stock plugins. These will require Docker to be configured and have
security considerations, so proceed with caution. The path is
relative to the Galaxy root dir. To use an absolute path begin
- the path with '/'. This is a comma separated list.
+ the path with '/'. This is a comma-separated list.
:Default: ``None``
:Type: str
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-``interactive_environment_swarm_mode``
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-:Description:
- To run interactive environment containers in Docker Swarm mode (on
- an existing swarm), set this option to true and set
- `docker_connect_port` in the IE plugin config (ini) file(s) of any
- IE plugins you have enabled and ensure that you are not using any
- `docker run`-specific options in your plugins' `command_inject`
- options (swarm mode services run using `docker service create`,
- which has a different and more limited set of options). This
- option can be overridden on a per-plugin basis by using the
- `swarm_mode` option in the plugin's ini config file.
-:Default: ``false``
-:Type: bool
-
-
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-``swarm_manager_config_file``
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-:Description:
- Galaxy can run a "swarm manager" service that will monitor
- utilization of the swarm and provision/deprovision worker nodes as
- necessary. The service has its own configuration file.
-:Default: ``config/swarm_manager_conf.yml``
-:Type: str
-
-
~~~~~~~~~~~~~~~~~~~
``tour_config_dir``
~~~~~~~~~~~~~~~~~~~
@@ -867,7 +873,9 @@
Each job is given a unique empty directory as its current working
directory. This option defines in what parent directory those
directories will be created.
-:Default: ``database/jobs_directory``
+ Default value will be resolved to 'database/jobs_directory' where
+ 'database' is the default value of the 'data_dir' option).
+:Default: ``jobs_directory``
:Type: str
@@ -878,7 +886,8 @@
:Description:
If using a cluster, Galaxy will write job scripts and
stdout/stderr to this directory.
-:Default: ``database/pbs``
+ Value will be resolved with respect to .
+:Default: ``pbs``
:Type: str
@@ -889,7 +898,9 @@
:Description:
Mako templates are compiled as needed and cached for reuse, this
directory is used for the cache
-:Default: ``database/compiled_templates``
+ Default value will be resolved to 'database/compiled_templates'
+ where 'database' is the default value of the 'data_dir' option).
+:Default: ``compiled_templates``
:Type: str
@@ -964,7 +975,9 @@
fetched from external sources such as https://doi.org/ by Galaxy -
the following parameters can be used to control the caching used
to store this information.
-:Default: ``database/citations/data``
+ Default value will be resolved to 'database/citations/data' where
+ 'database' is the default value of the 'data_dir' option).
+:Default: ``citations/data``
:Type: str
@@ -977,7 +990,9 @@
fetched from external sources such as https://doi.org/ by Galaxy -
the following parameters can be used to control the caching used
to store this information.
-:Default: ``database/citations/lock``
+ Default value will be resolved to 'database/citations/locks' where
+ 'database' is the default value of the 'data_dir' option).
+:Default: ``citations/locks``
:Type: str
@@ -1015,7 +1030,7 @@
needs to send mail through an SMTP server, which you may define
here (host:port). Galaxy will automatically try STARTTLS but will
continue upon failure.
-:Default: ````
+:Default: ``None``
:Type: str
@@ -1027,7 +1042,7 @@
If your SMTP server requires a username and password, you can
provide them here (password in cleartext here, but if your server
supports STARTTLS it will be sent over the network encrypted).
-:Default: ````
+:Default: ``None``
:Type: str
@@ -1039,7 +1054,7 @@
If your SMTP server requires a username and password, you can
provide them here (password in cleartext here, but if your server
supports STARTTLS it will be sent over the network encrypted).
-:Default: ````
+:Default: ``None``
:Type: str
@@ -1063,7 +1078,8 @@
list. This is the address used to subscribe to the list. Uncomment
and leave empty if you want to remove this option from the user
registration form.
-:Default: ``galaxy-announce-join@bx.psu.edu``
+ Example value 'galaxy-announce-join@bx.psu.edu'
+:Default: ``None``
:Type: str
@@ -1077,7 +1093,7 @@
disabled if no address is set. Also this email is shown as a
contact to user in case of Galaxy misconfiguration and other
events user may encounter.
-:Default: ````
+:Default: ``None``
:Type: str
@@ -1091,7 +1107,7 @@
resets. We recommend using string in the following format: Galaxy
Project If not configured, '' will be used.
-:Default: ````
+:Default: ``None``
:Type: str
@@ -1102,7 +1118,8 @@
:Description:
URL of the support resource for the galaxy instance. Used in
activation emails.
-:Default: ``https://galaxyproject.org/``
+ Example value 'https://galaxyproject.org/'
+:Default: ``None``
:Type: str
@@ -1115,7 +1132,8 @@
using disposable email address during the registration. If their
address domain matches any domain in the blacklist, they are
refused the registration.
-:Default: ``config/disposable_email_blacklist.conf``
+ Example value 'config/disposable_email_blacklist.conf'
+:Default: ``None``
:Type: str
@@ -1202,7 +1220,7 @@
:Description:
You can enter tracking code here to track visitor's behavior
through your Google Analytics account. Example: UA-XXXXXXXX-Y
-:Default: ````
+:Default: ``None``
:Type: str
@@ -1214,18 +1232,19 @@
Galaxy can display data at various external browsers. These
options specify which browsers should be available. URLs and
builds available at these browsers are defined in the specified
- files. If use_remote_user is set to true, display application
- servers will be denied access to Galaxy and so displaying datasets
- in these sites will fail. display_servers contains a list of
+ files.
+ If use_remote_user is set to true, display application servers
+ will be denied access to Galaxy and so displaying datasets in
+ these sites will fail. display_servers contains a list of
hostnames which should be allowed to bypass security to display
datasets. Please be aware that there are security implications if
this is allowed. More details (including required changes to the
proxy server config) are available in the Apache proxy
- documentation on the Galaxy Community Hub. The list of servers in
- this sample config are for the UCSC Main, Test and Archaea
- browsers, but the default if left commented is to not allow any
- display sites to bypass security (you must uncomment the line
- below to allow them).
+ documentation on the Galaxy Community Hub.
+ The list of servers in this sample config are for the UCSC Main,
+ Test and Archaea browsers, but the default if left commented is to
+ not allow any display sites to bypass security (you must uncomment
+ the line below to allow them).
:Default: ``hgw1.cse.ucsc.edu,hgw2.cse.ucsc.edu,hgw3.cse.ucsc.edu,hgw4.cse.ucsc.edu,hgw5.cse.ucsc.edu,hgw6.cse.ucsc.edu,hgw7.cse.ucsc.edu,hgw8.cse.ucsc.edu,lowepub.cse.ucsc.edu``
:Type: str
@@ -1284,7 +1303,7 @@
:Description:
Show a message box under the masthead.
-:Default: ````
+:Default: ``None``
:Type: str
@@ -1305,7 +1324,7 @@
:Description:
Append "/{brand}" to the "Galaxy" text in the masthead.
-:Default: ````
+:Default: ``None``
:Type: str
@@ -1359,11 +1378,11 @@
URL (with schema http/https) of the Galaxy instance as accessible
within your local network - if specified used as a default by
pulsar file staging and Jupyter Docker container for communicating
- back with Galaxy via the API. If you are attempting to set up
- GIEs on Mac OS X with Docker Desktop for Mac and your Galaxy
- instance runs on port 8080 this should be
- 'http://host.docker.internal:8080'. For more details see
- https://docs.docker.com/docker-for-mac/networking/
+ back with Galaxy via the API.
+ If you are attempting to set up GIEs on Mac OS X with Docker
+ Desktop for Mac and your Galaxy instance runs on port 8080 this
+ should be 'http://host.docker.internal:8080'. For more details
+ see https://docs.docker.com/docker-for-mac/networking/
:Default: ``http://localhost:8080``
:Type: str
@@ -1412,7 +1431,7 @@
:Description:
The URL linked by the "Galaxy Help" link in the "Help" menu.
-:Default: ````
+:Default: ``None``
:Type: str
@@ -1496,7 +1515,7 @@
The URL linked by the "Terms and Conditions" link in the "Help"
menu, as well as on the user registration and login forms and in
the activation emails.
-:Default: ````
+:Default: ``None``
:Type: str
@@ -1649,7 +1668,7 @@
Redirect. This should be set to the path defined in the nginx
config as an internal redirect with access to Galaxy's data files
(see documentation linked above).
-:Default: ````
+:Default: ``None``
:Type: str
@@ -1692,7 +1711,7 @@
explained in detail in the documentation linked above. The upload
store is a temporary directory in which files uploaded by the
upload module will be placed.
-:Default: ````
+:Default: ``None``
:Type: str
@@ -1704,7 +1723,7 @@
This value overrides the action set on the file upload form, e.g.
the web path where the nginx_upload_module has been configured to
intercept upload requests.
-:Default: ````
+:Default: ``None``
:Type: str
@@ -1717,7 +1736,7 @@
out upon job completion by remote job runners (i.e. Pulsar) that
initiate staging operations on the remote end. See the Galaxy
nginx documentation for the corresponding nginx configuration.
-:Default: ````
+:Default: ``None``
:Type: str
@@ -1730,7 +1749,7 @@
out upon job completion by remote job runners (i.e. Pulsar) that
initiate staging operations on the remote end. See the Galaxy
nginx documentation for the corresponding nginx configuration.
-:Default: ````
+:Default: ``None``
:Type: str
@@ -1781,7 +1800,9 @@
:Description:
The NodeJS dynamic proxy can use an SQLite database or a JSON file
for IPC, set that here.
-:Default: ``database/session_map.sqlite``
+ Default value will be resolved to 'database/session_map.sqlite'
+ where 'database' is the default value of the 'data_dir' option).
+:Default: ``session_map.sqlite``
:Type: str
@@ -1888,7 +1909,7 @@
Galaxy instead of a JSON or SQLite file for IPC. If you do not
specify this, it will be set randomly for you. You should set this
if you are managing the proxy manually.
-:Default: ````
+:Default: ``None``
:Type: str
@@ -1959,7 +1980,7 @@
:Description:
Turn on logging of application events and some user events to the
database.
-:Default: ``true``
+:Default: ``false``
:Type: bool
@@ -1972,7 +1993,7 @@
currently logged are grid views, tool searches, and use of
"recently" used tools menu. The log_events and log_actions
functionality will eventually be merged.
-:Default: ``true``
+:Default: ``false``
:Type: bool
@@ -2057,11 +2078,11 @@
Return a Access-Control-Allow-Origin response header that matches
the Origin header of the request if that Origin hostname matches
one of the strings or regular expressions listed here. This is a
- comma separated list of hostname strings or regular expressions
+ comma-separated list of hostname strings or regular expressions
beginning and ending with /. E.g.
mysite.com,google.com,usegalaxy.org,/^[\w\.]*example\.com/ See:
https://developer.mozilla.org/en-US/docs/Web/HTTP/CORS
-:Default: ````
+:Default: ``None``
:Type: str
@@ -2185,7 +2206,8 @@
:Description:
Heartbeat log filename. Can accept the template variables
{server_name} and {pid}
-:Default: ``heartbeat_{server_name}.log``
+ Sample default 'heartbeat_{server_name}.log'
+:Default: ``None``
:Type: str
@@ -2199,7 +2221,7 @@
middleware and errors will be sent to the indicated sentry
instance. This connection string is available in your sentry
instance under -> Settings -> API Keys.
-:Default: ````
+:Default: ``None``
:Type: str
@@ -2282,7 +2304,7 @@
:Description:
Add an option to the library upload form which allows
administrators to upload a directory of files.
-:Default: ````
+:Default: ``None``
:Type: str
@@ -2297,7 +2319,7 @@
admin user's Galaxy login ( email ). The non-admin user is
restricted to uploading files or sub-directories of files
contained in their directory.
-:Default: ````
+:Default: ``None``
:Type: str
@@ -2325,7 +2347,7 @@
*any* user with library import permissions can import from
anywhere in these directories (assuming they are able to create
symlinks to them).
-:Default: ````
+:Default: ``None``
:Type: str
@@ -2550,8 +2572,8 @@
User authentication can be delegated to an upstream proxy server
(usually Apache). The upstream proxy should set a REMOTE_USER
header in the request. Enabling remote user disables regular
- logins. For more information, see: https://docs.galaxyproject.org
- /en/master/admin/special_topics/apache.html
+ logins. For more information, see:
+ https://docs.galaxyproject.org/en/master/admin/special_topics/apache.html
:Default: ``false``
:Type: bool
@@ -2565,7 +2587,7 @@
method just returns bare usernames, set a default mail domain to
be appended to usernames, to become your Galaxy usernames (email
addresses).
-:Default: ````
+:Default: ``None``
:Type: str
@@ -2607,7 +2629,7 @@
:Description:
If use_remote_user is enabled, you can set this to a URL that will
log your users out.
-:Default: ````
+:Default: ``None``
:Type: str
@@ -2647,7 +2669,7 @@
the Admin section of the server, and will have access to create
users, groups, roles, libraries, and more. For more information,
see: https://galaxyproject.org/admin/
-:Default: ````
+:Default: ``None``
:Type: str
@@ -2751,8 +2773,9 @@
have the correct user show up. This makes less sense on large
public Galaxy instances where that data shouldn't be exposed. For
semi-public Galaxies, it may make sense to expose just the
- username and not email, or vice versa. If enable_beta_gdpr is set
- to true, then this option will be overridden and set to false.
+ username and not email, or vice versa.
+ If enable_beta_gdpr is set to true, then this option will be
+ overridden and set to false.
:Default: ``false``
:Type: bool
@@ -2768,8 +2791,9 @@
have the correct user show up. This makes less sense on large
public Galaxy instances where that data shouldn't be exposed. For
semi-public Galaxies, it may make sense to expose just the
- username and not email, or vice versa. If enable_beta_gdpr is set
- to true, then this option will be overridden and set to false.
+ username and not email, or vice versa.
+ If enable_beta_gdpr is set to true, then this option will be
+ overridden and set to false.
:Default: ``false``
:Type: bool
@@ -2785,7 +2809,7 @@
the administrator did not intend to expose. Previously, you could
request any network service that Galaxy might have had access to,
even if the user could not normally access it. It should be a
- comma separated list of IP addresses or IP address/mask, e.g.
+ comma-separated list of IP addresses or IP address/mask, e.g.
10.10.10.10,10.0.1.0/24,fd00::/8
:Default: ``None``
:Type: str
@@ -2801,12 +2825,13 @@
emails and usernames from logs and bug reports. It also causes the
delete user admin action to permanently redact their username and
password, but not to delete data associated with the account as
- this is not currently easily implementable. You are responsible
- for removing personal data from backups. This forces
- expose_user_email and expose_user_name to be false, and forces
- user_deletion to be true to support the right to erasure. Please
- read the GDPR section under the special topics area of the admin
- documentation.
+ this is not currently easily implementable.
+ You are responsible for removing personal data from backups.
+ This forces expose_user_email and expose_user_name to be false,
+ and forces user_deletion to be true to support the right to
+ erasure.
+ Please read the GDPR section under the special topics area of the
+ admin documentation.
:Default: ``false``
:Type: bool
@@ -2827,7 +2852,9 @@
:Description:
Enable beta workflow modules that should not yet be considered
- part of Galaxy's stable API.
+ part of Galaxy's stable API. (The module state definitions may
+ change and workflows built using these modules may not function
+ in the future.)
:Default: ``false``
:Type: bool
@@ -2989,7 +3016,7 @@
:Description:
Optional list of email addresses of API users who can make calls
on behalf of other users.
-:Default: ````
+:Default: ``None``
:Type: str
@@ -3002,7 +3029,7 @@
actually having a defined admin user in the database/config. Only
set this if you need to bootstrap Galaxy, you probably do not want
to set this on public servers.
-:Default: ``changethis``
+:Default: ``None``
:Type: str
@@ -3022,7 +3049,9 @@
:Description:
If OpenID is enabled, consumer cache directory to use.
-:Default: ``database/openid_consumer_cache``
+ Default value will be resolved to 'database/openid_consumer_cache'
+ where 'database' is the default value of the 'data_dir' option).
+:Default: ``openid_consumer_cache``
:Type: str
@@ -3076,7 +3105,7 @@
following two options. This should point to a directory containing
subdirectories matching users' identifier (defaults to e-mail),
where Galaxy will look for files.
-:Default: ````
+:Default: ``None``
:Type: str
@@ -3087,7 +3116,7 @@
:Description:
This should be the hostname of your FTP server, which will be
provided to users in the help text.
-:Default: ````
+:Default: ``None``
:Type: str
@@ -3222,8 +3251,8 @@
you can separate Galaxy into multiple processes. There are more
than one way to do this, and they are explained in detail in the
documentation:
- https://docs.galaxyproject.org/en/master/admin/scaling.html By
- default, Galaxy manages and executes jobs from within a single
+ https://docs.galaxyproject.org/en/master/admin/scaling.html
+ By default, Galaxy manages and executes jobs from within a single
process and notifies itself of new jobs via in-memory queues.
Jobs are run locally on the system on which Galaxy is started.
Advanced job running capabilities can be configured through the
@@ -3447,7 +3476,9 @@
(https://docs.galaxyproject.org/en/master/admin/cluster.html
#submitting-jobs-as-the-real-user) this script is used to run the
job script Galaxy generates for a tool execution.
-:Default: ``sudo -E scripts/drmaa_external_runner.py --assign_all_groups``
+ Example value 'sudo -E scripts/drmaa_external_runner.py
+ --assign_all_groups'
+:Default: ``None``
:Type: str
@@ -3460,7 +3491,8 @@
(https://docs.galaxyproject.org/en/master/admin/cluster.html
#submitting-jobs-as-the-real-user) this script is used to kill
such jobs by Galaxy (e.g. if the user cancels the job).
-:Default: ``sudo -E scripts/drmaa_external_killer.py``
+ Example value 'sudo -E scripts/drmaa_external_killer.py'
+:Default: ``None``
:Type: str
@@ -3474,7 +3506,8 @@
#submitting-jobs-as-the-real-user) this script is used transfer
permissions back and forth between the Galaxy user and the user
that is running the job.
-:Default: ``sudo -E scripts/external_chown_script.py``
+ Example value 'sudo -E scripts/external_chown_script.py'
+:Default: ``None``
:Type: str
@@ -3513,7 +3546,7 @@
environment prior to running tools. This can be especially useful
for running jobs as the actual user, to remove the need to
configure each user's environment individually.
-:Default: ````
+:Default: ``None``
:Type: str
@@ -3671,7 +3704,7 @@
The base module(s) that are searched for modules for toolbox
filtering (https://galaxyproject.org/user-defined-toolbox-
filters/) functions.
-:Default: ``galaxy.tools.toolbox.filters,galaxy.tools.filters``
+:Default: ``galaxy.tools.filters,galaxy.tools.toolbox.filters``
:Type: str
@@ -3684,12 +3717,13 @@
For example, when reloading the toolbox or locking job execution,
the process that handled that particular request will tell all
others to also reload, lock jobs, etc. For connection examples,
- see http://docs.celeryproject.org/projects/kombu/en/latest/usergui
- de/connections.html Without specifying anything here, galaxy will
- first attempt to use your specified database_connection above. If
- that's not specified either, Galaxy will automatically create and
- use a separate sqlite database located in your /database
- folder (indicated in the commented out line below).
+ see
+ http://docs.celeryproject.org/projects/kombu/en/latest/userguide/connections.html
+ Without specifying anything here, galaxy will first attempt to use
+ your specified database_connection above. If that's not specified
+ either, Galaxy will automatically create and use a separate sqlite
+ database located in your /database folder (indicated in
+ the commented out line below).
:Default: ``sqlalchemy+sqlite:///./database/control.sqlite?isolation_level=IMMEDIATE``
:Type: str
diff --git a/doc/source/admin/production.md b/doc/source/admin/production.md
index c7ea2f4b5b2f..afa14d225846 100644
--- a/doc/source/admin/production.md
+++ b/doc/source/admin/production.md
@@ -75,11 +75,11 @@ postgresql:///mydatabase?host=/var/run/postgresql
For more hints on available options for the database URL, see the [SQLAlchemy documentation](https://docs.sqlalchemy.org/en/latest/core/engines.html#database-urls).
-#### Deprecated MySQL support
+#### MySQL database server (unsupported)
-Postgres database backend is more used and better tested, please use it if you can.
+Postgres database backend is far more used and better tested, please use it if you can.
-In the past Galaxy supported [MySQL](https://dev.mysql.com/) but currently there are nontrivial problems associated with it so expect amount of troubleshooting when using it.
+In the past Galaxy supported [MySQL](https://dev.mysql.com/), but currently there may be nontrivial problems associated with it, so expect some amount of troubleshooting when using it.
Connection string example:
```
diff --git a/doc/source/admin/reports_options.rst b/doc/source/admin/reports_options.rst
index ce307a2cab9a..61bd155a3db7 100644
--- a/doc/source/admin/reports_options.rst
+++ b/doc/source/admin/reports_options.rst
@@ -132,9 +132,10 @@
:Description:
Enables GDPR Compliance mode. This makes several changes to the
way Galaxy logs and exposes data externally such as removing
- emails/usernames from logs and bug reports. You are responsible
- for removing personal data from backups. Please read the GDPR
- section under the special topics area of the admin documentation.
+ emails/usernames from logs and bug reports.
+ You are responsible for removing personal data from backups.
+ Please read the GDPR section under the special topics area of the
+ admin documentation.
:Default: ``false``
:Type: bool
diff --git a/doc/source/admin/special_topics/interactive_environments.rst b/doc/source/admin/special_topics/interactive_environments.rst
index f7e7fce324c1..c8d87b9d6153 100644
--- a/doc/source/admin/special_topics/interactive_environments.rst
+++ b/doc/source/admin/special_topics/interactive_environments.rst
@@ -281,7 +281,7 @@ system. Legacy Docker Swarm is supported without any special configuration,
because the containers are still run with ``docker run`` as before. To support
Docker Engine swarm mode, additional configuration is required. Begin by
editing your GIE plugin's ini configuration file (e.g. ``jupyter.ini``) and set
-the ``docker_connect_port`` and ``swarm_mode options`` in addition to any other
+the ``docker_connect_port`` in addition to any other
relevant options. Unless you are using a non-standard Docker image, the correct
value for ``docker_connect_port`` should be suggested to you in the sample
configuration file:
@@ -290,13 +290,6 @@ configuration file:
[docker]
docker_connect_port = 8888
- swarm_mode = True
-
-You can also enable swarm mode for *all* GIE plugins by setting
-``interactive_environment_swarm_mode`` in ``galaxy.yml`` to ``True``. If using
-this setting, you must still set ``docker_connect_port`` in each GIE plugin's
-ini configuration file. The ``swarm_mode`` setting in individual GIE plugin
-config files will override the value set in ``galaxy.yml``.
Note that your Galaxy server does not need to be a member of the swarm itself.
It can use the method outlined above in the `Docker on Another Host`_ section
@@ -305,11 +298,3 @@ to connect as a client to a Docker daemon acting as a swarm mode manager.
Once configured, you should see that your GIE containers are started and run as
services, which you can inspect using the ``docker service ls`` command and
other ``docker service`` subcommands.
-
-**Galaxy swarm manager**
-
-Galaxy will start a "swarm manager" process when the first swarm mode GIE is
-launched. You can control this daemon with the config file
-``config/swarm_mode_manager.yml``. Consult the sample configuration at
-``config/swarm_mode_manager.yml.sample`` for syntax. It will automatically shut
-down when no services or nodes remain to be managed.
diff --git a/doc/source/dev/interactive_environments.rst b/doc/source/dev/interactive_environments.rst
index 0f173bec0cd6..27b7925032bc 100644
--- a/doc/source/dev/interactive_environments.rst
+++ b/doc/source/dev/interactive_environments.rst
@@ -123,14 +123,6 @@ Once this is done, we can set up our INI file, ``config/helloworld.ini.sample``
# container cannot or should not be used.
#use_volumes = True
- # To run containers in Docker Swarm mode on (an existing swarm), set the
- # following option to True *and*:
- # - set docker_connect_port above. For Nginx the # port should most likely be
- # 80.
- # - If command_inject is uncommented and includes `--sig-proxy`, that option should
- # be removed.
- #swarm_mode = False
-
You'll then need to create the GIE plugin YML file ``allowed_images.yml.sample``
to specify allowed images
.. code-block::
diff --git a/doc/source/project/issues.rst b/doc/source/project/issues.rst
index 993ec07adff3..b5539456fb83 100644
--- a/doc/source/project/issues.rst
+++ b/doc/source/project/issues.rst
@@ -110,6 +110,7 @@ particular domain, as well as more organized release notes.
- ``area/cleanup`` - General code cleanup
- ``area/client-build``
- ``area/compliance``
+- ``area/configuration`` - Galaxy's configuration system
- ``area/cwl`` - changes related to supporting the common workflow language in Galaxy
- ``area/database`` - Change requires a modification to Galaxy's database
- ``area/dataset-collections``
diff --git a/lib/galaxy/auth/__init__.py b/lib/galaxy/auth/__init__.py
index 066130db0b0c..644bcf49aa79 100644
--- a/lib/galaxy/auth/__init__.py
+++ b/lib/galaxy/auth/__init__.py
@@ -69,9 +69,7 @@ def check_auto_registration(self, trans, login, password, no_password_check=Fals
raise
return auth_return
elif auth_results[0] is None:
- auto_email = str(auth_results[1]).lower()
- auto_username = str(auth_results[2]).lower()
- log.debug("Email: %s, Username %s, stopping due to failed non-continue" % (auto_email, auto_username))
+ log.debug("Login: '%s', stopping due to failed non-continue", login)
break # end authentication (skip rest)
return auth_return
diff --git a/lib/galaxy/auth/providers/__init__.py b/lib/galaxy/auth/providers/__init__.py
index 514aa1b3eb81..b7176aa42ae3 100644
--- a/lib/galaxy/auth/providers/__init__.py
+++ b/lib/galaxy/auth/providers/__init__.py
@@ -33,9 +33,10 @@ def authenticate(self, email, username, password, options):
:param options: options provided in auth_config_file
:type options: dict
:returns: True: accept user, False: reject user and None: reject user
- and don't try any other providers. str, str is the email and
- username to register with if accepting
- :rtype: (bool, str, str)
+ and don't try any other providers. str, str are the email and
+ username to register with if accepting. The optional dict may
+ contain other attributes, e.g. roles to assign when autoregistering.
+ :rtype: (bool, str, str) or (bool, str, str, dict)
"""
@abc.abstractmethod
diff --git a/lib/galaxy/auth/providers/ldap_ad.py b/lib/galaxy/auth/providers/ldap_ad.py
index 27f74e98f81d..3a7f4299753e 100644
--- a/lib/galaxy/auth/providers/ldap_ad.py
+++ b/lib/galaxy/auth/providers/ldap_ad.py
@@ -7,6 +7,7 @@
import logging
from galaxy.exceptions import ConfigurationError
+from galaxy.security.validate_user_input import transform_publicname
from galaxy.util import string_as_bool
from ..providers import AuthProvider
@@ -39,7 +40,7 @@ def _parse_ldap_options(options_unparsed):
try:
key, value = opt.split("=")
except ValueError:
- log.warning("LDAP authenticate: Invalid syntax '%s' inside element. Syntax should be option1=value1,option2=value2" % opt)
+ log.warning("LDAP authenticate: Invalid syntax '%s' inside element. Syntax should be option1=value1,option2=value2", opt)
continue
if not key.startswith(prefix):
@@ -151,6 +152,8 @@ def ldap_search(self, email, username, options):
# setup search
attributes = [_.strip().format(**params)
for _ in options['search-fields'].split(',')]
+ if 'search-memberof-filter' in options:
+ attributes.append('memberOf')
suser = l.search_ext_s(_get_subs(options, 'search-base', params),
ldap.SCOPE_SUBTREE,
_get_subs(options, 'search-filter', params), attributes,
@@ -161,13 +164,15 @@ def ldap_search(self, email, username, options):
log.warning('LDAP authenticate: search returned no results')
return (failure_mode, None)
dn, attrs = suser[0]
- log.debug(("LDAP authenticate: dn is %s" % dn))
- log.debug(("LDAP authenticate: search attributes are %s" % attrs))
+ log.debug("LDAP authenticate: dn is %s", dn)
+ log.debug("LDAP authenticate: search attributes are %s", attrs)
if hasattr(attrs, 'has_key'):
for attr in attributes:
if self.role_search_attribute and attr == self.role_search_attribute[1:-1]: # strip brackets
# keep role names as list
params[self.role_search_option] = attrs[attr]
+ elif attr == 'memberOf':
+ params[attr] = attrs[attr]
elif attr in attrs:
params[attr] = str(attrs[attr][0])
else:
@@ -177,7 +182,6 @@ def ldap_search(self, email, username, options):
raise ConfigurationError("Missing or mismatching LDAP parameters for %s. Make sure the %s is "
"included in the 'search-fields'." %
(self.role_search_option, self.role_search_attribute))
- log.critical(params)
params['dn'] = dn
except Exception:
log.exception('LDAP authenticate: search exception')
@@ -190,10 +194,10 @@ def authenticate(self, email, username, password, options):
See abstract method documentation.
"""
if not options['redact_username_in_logs']:
- log.debug("LDAP authenticate: email is %s" % email)
- log.debug("LDAP authenticate: username is %s" % username)
+ log.debug("LDAP authenticate: email is %s", email)
+ log.debug("LDAP authenticate: username is %s", username)
- log.debug("LDAP authenticate: options are %s" % options)
+ log.debug("LDAP authenticate: options are %s", options)
failure_mode, params = self.ldap_search(email, username, options)
if not params:
@@ -205,19 +209,24 @@ def authenticate(self, email, username, password, options):
if not self._authenticate(params, options):
return failure_mode, '', ''
+ # check whether the user is a member of a specified group/domain/...
+ if 'search-memberof-filter' in options:
+ search_filter = _get_subs(options, 'search-memberof-filter', params)
+ if not any(search_filter in ad_node_name for ad_node_name in params['memberOf']):
+ return failure_mode, '', ''
+
attributes = {}
if self.auto_create_roles_or_groups:
attributes['roles'] = params[self.role_search_option]
return (True,
_get_subs(options, 'auto-register-email', params),
- _get_subs(options, 'auto-register-username', params),
+ transform_publicname(_get_subs(options, 'auto-register-username', params)),
attributes)
def _authenticate(self, params, options):
"""
Do the actual authentication by binding as the user to check their credentials
"""
- import ldap
try:
l = ldap.initialize(_get_subs(options, 'server', params))
l.protocol_version = 3
@@ -230,11 +239,10 @@ def _authenticate(self, params, options):
# The "Who am I?" extended operation is not supported by this LDAP server
pass
else:
- if not options['redact_username_in_logs']:
- log.debug("LDAP authenticate: whoami is %s", whoami)
-
if whoami is None:
raise RuntimeError('LDAP authenticate: anonymous bind')
+ if not options['redact_username_in_logs']:
+ log.debug("LDAP authenticate: whoami is %s", whoami)
except Exception:
log.warning('LDAP authenticate: bind exception', exc_info=True)
return False
diff --git a/lib/galaxy/auth/util.py b/lib/galaxy/auth/util.py
index 86bec13c6928..d99adc147b27 100644
--- a/lib/galaxy/auth/util.py
+++ b/lib/galaxy/auth/util.py
@@ -68,7 +68,6 @@ def get_authenticators(auth_config_file, auth_config_file_set):
def parse_auth_results(trans, auth_results, options):
auth_return = {}
auth_result, auto_email, auto_username = auth_results[:3]
- auto_email = str(auto_email).lower()
auto_username = str(auto_username).lower()
# make username unique
if validate_publicname(trans, auto_username) != '':
diff --git a/lib/galaxy/config/__init__.py b/lib/galaxy/config/__init__.py
index 4c40d61fdeef..bea76ef4d6bf 100644
--- a/lib/galaxy/config/__init__.py
+++ b/lib/galaxy/config/__init__.py
@@ -29,7 +29,6 @@
from galaxy.exceptions import ConfigurationError
from galaxy.model import mapping
from galaxy.model.tool_shed_install.migrate.check import create_or_verify_database as tsi_create_or_verify_database
-from galaxy.tool_util.deps.container_resolvers.mulled import DEFAULT_CHANNELS
from galaxy.util import (
ExecutionTimer,
listify,
@@ -55,18 +54,13 @@
GALAXY_APP_NAME = 'galaxy'
GALAXY_CONFIG_SCHEMA_PATH = 'lib/galaxy/webapps/galaxy/config_schema.yml'
LOGGING_CONFIG_DEFAULT = {
+ 'disable_existing_loggers': False,
'version': 1,
'root': {
'handlers': ['console'],
- 'level': 'INFO',
+ 'level': 'DEBUG',
},
'loggers': {
- 'galaxy': {
- 'handlers': ['console'],
- 'level': 'DEBUG',
- 'propagate': 0,
- 'qualname': 'galaxy',
- },
'paste.httpserver.ThreadPool': {
'level': 'WARN',
'qualname': 'paste.httpserver.ThreadPool',
@@ -75,6 +69,10 @@
'level': 'WARN',
'qualname': 'routes.middleware',
},
+ 'amqp': {
+ 'level': 'INFO',
+ 'qualname': 'amqp',
+ },
},
'filters': {
'stack': {
@@ -99,13 +97,6 @@
"""Default value for logging configuration, passed to :func:`logging.config.dictConfig`"""
-def resolve_path(path, root):
- """If 'path' is relative make absolute by prepending 'root'"""
- if not os.path.isabs(path):
- path = os.path.join(root, path)
- return path
-
-
def find_root(kwargs):
root = os.path.abspath(kwargs.get('root_dir', '.'))
return root
@@ -169,29 +160,31 @@ def _parse_config_file_options(self, defaults, listify_defaults, config_kwargs):
setattr(self, var + '_set', True)
else:
for value in values:
- if os.path.exists(resolve_path(value, self.root)):
+ if os.path.exists(os.path.join(self.root, value)):
path = value
break
else:
path = values[-1]
setattr(self, var + '_set', False)
- setattr(self, var, resolve_path(path, self.root))
+ setattr(self, var, os.path.join(self.root, path))
for var, values in listify_defaults.items():
paths = []
if config_kwargs.get(var, None) is not None:
paths = listify(config_kwargs.get(var))
+ setattr(self, var + '_set', True)
else:
for value in values:
for path in listify(value):
- if not os.path.exists(resolve_path(path, self.root)):
+ if not os.path.exists(os.path.join(self.root, path)):
break
else:
paths = listify(value)
break
else:
paths = listify(values[-1])
- setattr(self, var, [resolve_path(x, self.root) for x in paths])
+ setattr(self, var + '_set', False)
+ setattr(self, var, [os.path.join(self.root, x) for x in paths])
class GalaxyAppConfiguration(BaseAppConfiguration):
@@ -239,12 +232,6 @@ def _process_config(self, kwargs):
self.root = find_root(kwargs)
self._set_config_base(kwargs)
- # Configs no longer read from samples
- self.migrated_tools_config = resolve_path(kwargs.get('migrated_tools_config', 'migrated_tools_conf.xml'), self.mutable_config_dir)
- self.shed_tool_conf = resolve_path(kwargs.get('shed_tool_conf', 'shed_tool_conf.xml'), self.mutable_config_dir)
- for name in ('migrated_tools_config', 'shed_tool_conf'):
- setattr(self, name + '_set', kwargs.get(name, None) is not None)
-
# Resolve paths of other config files
self.parse_config_file_options(kwargs)
@@ -257,7 +244,7 @@ def _process_config(self, kwargs):
# Database related configuration
self.check_migrate_databases = kwargs.get('check_migrate_databases', True)
self.database_connection = kwargs.get("database_connection",
- "sqlite:///%s?isolation_level=IMMEDIATE" % resolve_path("universe.sqlite", self.data_dir))
+ "sqlite:///%s?isolation_level=IMMEDIATE" % os.path.join(self.data_dir, "universe.sqlite"))
self.database_engine_options = get_database_engine_options(kwargs)
self.database_create_tables = string_as_bool(kwargs.get("database_create_tables", "True"))
self.database_query_profiling_proxy = string_as_bool(kwargs.get("database_query_profiling_proxy", "False"))
@@ -267,38 +254,35 @@ def _process_config(self, kwargs):
self.thread_local_log = threading.local()
# Install database related configuration (if different).
- self.install_database_connection = kwargs.get("install_database_connection", None)
self.install_database_engine_options = get_database_engine_options(kwargs, model_prefix="install_")
# Where dataset files are stored
- self.file_path = resolve_path(kwargs.get("file_path", "files"), self.data_dir)
+ self.file_path = os.path.join(self.data_dir, self.file_path)
# new_file_path and legacy_home_dir can be overridden per destination in job_conf.
- self.new_file_path = resolve_path(kwargs.get("new_file_path", "tmp"), self.data_dir)
+ self.new_file_path = os.path.join(self.data_dir, self.new_file_path)
override_tempdir = string_as_bool(kwargs.get("override_tempdir", "True"))
if override_tempdir:
tempfile.tempdir = self.new_file_path
self.shared_home_dir = kwargs.get("shared_home_dir", None)
- self.openid_consumer_cache_path = resolve_path(kwargs.get("openid_consumer_cache_path", "openid_consumer_cache"), self.data_dir)
+ self.openid_consumer_cache_path = os.path.join(self.data_dir, self.openid_consumer_cache_path)
self.cookie_path = kwargs.get("cookie_path", None)
- self.tool_path = resolve_path(kwargs.get("tool_path", "tools"), self.root)
- self.tool_data_path = resolve_path(kwargs.get("tool_data_path", "tool-data"), os.getcwd())
+ self.tool_path = os.path.join(self.root, self.tool_path)
+ self.tool_data_path = os.path.join(self.root, self.tool_data_path)
if not running_from_source and kwargs.get("tool_data_path", None) is None:
- self.tool_data_path = resolve_path("tool-data", self.data_dir)
- self.builds_file_path = resolve_path(kwargs.get("builds_file_path", os.path.join(self.tool_data_path, 'shared', 'ucsc', 'builds.txt')), self.root)
- self.len_file_path = resolve_path(kwargs.get("len_file_path", os.path.join(self.tool_data_path, 'shared', 'ucsc', 'chrom')), self.root)
+ self.tool_data_path = os.path.join(self.data_dir, "tool-data")
+ self.builds_file_path = os.path.join(self.tool_data_path, self.builds_file_path)
+ self.len_file_path = os.path.join(self.tool_data_path, self.len_file_path)
# Galaxy OIDC settings.
self.oidc_config = kwargs.get("oidc_config_file", self.oidc_config_file)
self.oidc_backends_config = kwargs.get("oidc_backends_config_file", self.oidc_backends_config_file)
self.oidc = []
- # The value of migrated_tools_config is the file reserved for containing only those tools that have been eliminated from the distribution
- # and moved to the tool shed. It is created on demand.
- self.integrated_tool_panel_config = resolve_path(kwargs.get('integrated_tool_panel_config', 'integrated_tool_panel.xml'), self.mutable_config_dir)
+ self.integrated_tool_panel_config = os.path.join(self.mutable_config_dir, self.integrated_tool_panel_config)
integrated_tool_panel_tracking_directory = kwargs.get('integrated_tool_panel_tracking_directory', None)
if integrated_tool_panel_tracking_directory:
- self.integrated_tool_panel_tracking_directory = resolve_path(integrated_tool_panel_tracking_directory, self.root)
+ self.integrated_tool_panel_tracking_directory = os.path.join(self.root, integrated_tool_panel_tracking_directory)
else:
self.integrated_tool_panel_tracking_directory = None
- self.toolbox_filter_base_modules = listify(kwargs.get("toolbox_filter_base_modules", "galaxy.tools.filters,galaxy.tools.toolbox.filters"))
+ self.toolbox_filter_base_modules = listify(self.toolbox_filter_base_modules)
self.tool_filters = listify(kwargs.get("tool_filters", []), do_strip=True)
self.tool_label_filters = listify(kwargs.get("tool_label_filters", []), do_strip=True)
self.tool_section_filters = listify(kwargs.get("tool_section_filters", []), do_strip=True)
@@ -308,16 +292,16 @@ def _process_config(self, kwargs):
self.user_tool_section_filters = listify(kwargs.get("user_tool_section_filters", []), do_strip=True)
self.has_user_tool_filters = bool(self.user_tool_filters or self.user_tool_label_filters or self.user_tool_section_filters)
- self.tour_config_dir = resolve_path(kwargs.get("tour_config_dir", "config/plugins/tours"), self.root)
- self.webhooks_dirs = resolve_path(kwargs.get("webhooks_dir", "config/plugins/webhooks"), self.root)
+ self.tour_config_dir = os.path.join(self.root, self.tour_config_dir)
+ self.webhooks_dirs = os.path.join(self.root, self.webhooks_dir)
self.password_expiration_period = timedelta(days=int(kwargs.get("password_expiration_period", 0)))
- self.shed_tool_data_path = kwargs.get("shed_tool_data_path", None)
if self.shed_tool_data_path:
- self.shed_tool_data_path = resolve_path(self.shed_tool_data_path, self.root)
+ self.shed_tool_data_path = os.path.join(self.root, self.shed_tool_data_path)
else:
self.shed_tool_data_path = self.tool_data_path
+
self.running_functional_tests = string_as_bool(kwargs.get('running_functional_tests', False))
self.enable_tool_shed_check = string_as_bool(kwargs.get('enable_tool_shed_check', False))
if isinstance(self.hours_between_check, string_types):
@@ -350,21 +334,20 @@ def _process_config(self, kwargs):
for ip in kwargs.get("fetch_url_whitelist", "").split(',')
if len(ip.strip()) > 0
]
- self.template_path = resolve_path(kwargs.get("template_path", "templates"), self.root)
- self.template_cache = resolve_path(kwargs.get("template_cache_path", "compiled_templates"), self.data_dir)
+ self.template_path = os.path.join(self.root, kwargs.get("template_path", "templates"))
+ self.template_cache = os.path.join(self.data_dir, self.template_cache_path)
self.job_queue_cleanup_interval = int(kwargs.get("job_queue_cleanup_interval", "5"))
- self.cluster_files_directory = self.resolve_path(kwargs.get("cluster_files_directory", os.path.join(self.data_dir, "pbs")))
+ self.cluster_files_directory = self.resolve_path(os.path.join(self.data_dir, self.cluster_files_directory))
# Fall back to legacy job_working_directory config variable if set.
- default_jobs_directory = kwargs.get("job_working_directory", "jobs_directory")
- self.jobs_directory = resolve_path(kwargs.get("jobs_directory", default_jobs_directory), self.data_dir)
+ self.jobs_directory = os.path.join(self.data_dir, kwargs.get("jobs_directory", self.job_working_directory))
if self.preserve_python_environment not in ["legacy_only", "legacy_and_local", "always"]:
log.warning("preserve_python_environment set to unknown value [%s], defaulting to legacy_only")
self.preserve_python_environment = "legacy_only"
self.nodejs_path = kwargs.get("nodejs_path", None)
# Older default container cache path, I don't think anyone is using it anymore and it wasn't documented - we
# should probably drop the backward compatiblity to save the path check.
- self.container_image_cache_path = resolve_path(kwargs.get("container_image_cache_path", "container_images"), self.data_dir)
+ self.container_image_cache_path = os.path.join(self.data_dir, kwargs.get("container_image_cache_path", "container_images"))
if not os.path.exists(self.container_image_cache_path):
self.container_image_cache_path = self.resolve_path(kwargs.get("container_image_cache_path", os.path.join(self.data_dir, "container_cache")))
self.output_size_limit = int(kwargs.get('output_size_limit', 0))
@@ -372,18 +355,17 @@ def _process_config(self, kwargs):
activation_email = kwargs.get('activation_email', None)
self.email_from = kwargs.get('email_from', activation_email)
self.myexperiment_target_url = kwargs.get('my_experiment_target_url', 'www.myexperiment.org')
- self.instance_resource_url = kwargs.get('instance_resource_url', None)
+
# Get the disposable email domains blacklist file and its contents
- self.blacklist_location = kwargs.get('blacklist_file', None)
self.blacklist_content = None
- if self.blacklist_location is not None:
- self.blacklist_file = resolve_path(kwargs.get('blacklist_file', None), self.root)
+ if self.blacklist_file:
+ self.blacklist_file = os.path.join(self.root, self.blacklist_file)
try:
- with open(self.blacklist_file) as blacklist:
- self.blacklist_content = [line.rstrip() for line in blacklist.readlines()]
+ with open(self.blacklist_file) as f:
+ self.blacklist_content = [line.rstrip() for line in f]
except IOError:
log.error("CONFIGURATION ERROR: Can't open supplied blacklist file from path: " + str(self.blacklist_file))
- self.smtp_ssl = kwargs.get('smtp_ssl', None)
+
self.persistent_communication_rooms = listify(kwargs.get("persistent_communication_rooms", []), do_strip=True)
# The transfer manager and deferred job queue
self.enable_beta_job_managers = string_as_bool(kwargs.get('enable_beta_job_managers', 'False'))
@@ -404,11 +386,7 @@ def _process_config(self, kwargs):
self.pbs_dataset_server = kwargs.get('pbs_dataset_server', "")
self.pbs_dataset_path = kwargs.get('pbs_dataset_path', "")
self.pbs_stage_path = kwargs.get('pbs_stage_path', "")
- self.drmaa_external_runjob_script = kwargs.get('drmaa_external_runjob_script', None)
- self.drmaa_external_killjob_script = kwargs.get('drmaa_external_killjob_script', None)
- self.external_chown_script = kwargs.get('external_chown_script', None)
- self.heartbeat_log = kwargs.get('heartbeat_log', None)
- self.sanitize_whitelist_file = resolve_path(kwargs.get('sanitize_whitelist_file', "config/sanitize_whitelist.txt"), self.root)
+ self.sanitize_whitelist_file = os.path.join(self.root, self.sanitize_whitelist_file)
self.allowed_origin_hostnames = self._parse_allowed_origin_hostnames(kwargs)
if "trust_jupyter_notebook_conversion" in kwargs:
trust_jupyter_notebook_conversion = string_as_bool(kwargs.get('trust_jupyter_notebook_conversion', False))
@@ -430,7 +408,7 @@ def _process_config(self, kwargs):
# not needed on production systems but useful if running many functional tests.
self.index_tool_help = string_as_bool(kwargs.get("index_tool_help", True))
self.tool_labels_boost = kwargs.get("tool_labels_boost", 1)
- default_tool_test_data_directories = os.environ.get("GALAXY_TEST_FILE_DIR", resolve_path("test-data", self.root))
+ default_tool_test_data_directories = os.environ.get("GALAXY_TEST_FILE_DIR", os.path.join(self.root, "test-data"))
self.tool_test_data_directories = kwargs.get("tool_test_data_directories", default_tool_test_data_directories)
# Deployers may either specify a complete list of mapping files or get the default for free and just
# specify a local mapping file to adapt and extend the default one.
@@ -446,22 +424,18 @@ def _process_config(self, kwargs):
self.enable_mulled_containers = string_as_bool(kwargs.get('enable_mulled_containers', 'True'))
containers_resolvers_config_file = kwargs.get('containers_resolvers_config_file', None)
if containers_resolvers_config_file:
- containers_resolvers_config_file = resolve_path(containers_resolvers_config_file, self.root)
+ containers_resolvers_config_file = os.path.join(self.root, containers_resolvers_config_file)
self.containers_resolvers_config_file = containers_resolvers_config_file
- involucro_path = kwargs.get('involucro_path', None)
- if involucro_path is None:
- target_dir = kwargs.get("tool_dependency_dir", "dependencies")
- if target_dir == "none":
- target_dir = "dependencies"
- target_dir = resolve_path(target_dir, self.data_dir)
- involucro_path = os.path.join(target_dir, "involucro")
- self.involucro_path = resolve_path(involucro_path, self.root)
- mulled_channels = kwargs.get('mulled_channels')
- if mulled_channels:
- self.mulled_channels = [c.strip() for c in mulled_channels.split(',')]
- else:
- self.mulled_channels = DEFAULT_CHANNELS
+ # tool_dependency_dir can be "none" (in old configs). If so, set it to schema default
+ if self.tool_dependency_dir and self.tool_dependency_dir.lower() == 'none':
+ self.tool_dependency_dir = None
+ if self.involucro_path is None:
+ target_dir = self.tool_dependency_dir or self.appschema['tool_dependency_dir'].get('default')
+ self.involucro_path = os.path.join(self.data_dir, target_dir, "involucro")
+ self.involucro_path = os.path.join(self.root, self.involucro_path)
+ if self.mulled_channels:
+ self.mulled_channels = [c.strip() for c in self.mulled_channels.split(',')]
default_job_resubmission_condition = kwargs.get('default_job_resubmission_condition', '')
if not default_job_resubmission_condition.strip():
@@ -493,7 +467,7 @@ def _process_config(self, kwargs):
self.object_store_cache_size = float(kwargs.get('object_store_cache_size', -1))
self.distributed_object_store_config_file = kwargs.get('distributed_object_store_config_file', None)
if self.distributed_object_store_config_file is not None:
- self.distributed_object_store_config_file = resolve_path(self.distributed_object_store_config_file, self.root)
+ self.distributed_object_store_config_file = os.path.join(self.root, self.distributed_object_store_config_file)
self.irods_root_collection_path = kwargs.get('irods_root_collection_path', None)
self.irods_default_resource = kwargs.get('irods_default_resource', None)
# Heartbeat log file name override
@@ -559,7 +533,7 @@ def _process_config(self, kwargs):
elif 'database_connection' in kwargs:
self.amqp_internal_connection = "sqlalchemy+" + self.database_connection
else:
- self.amqp_internal_connection = "sqlalchemy+sqlite:///%s?isolation_level=IMMEDIATE" % resolve_path("control.sqlite", self.data_dir)
+ self.amqp_internal_connection = "sqlalchemy+sqlite:///%s?isolation_level=IMMEDIATE" % os.path.join(self.data_dir, "control.sqlite")
self.pretty_datetime_format = expand_pretty_datetime_format(kwargs.get('pretty_datetime_format', '$locale (UTC)'))
try:
with open(self.user_preferences_extra_conf_path, 'r') as stream:
@@ -582,16 +556,12 @@ def _process_config(self, kwargs):
# Statistics and profiling with statsd
self.statsd_host = kwargs.get('statsd_host', '')
- ie_dirs = kwargs.get('interactive_environment_plugins_directory', None)
+ ie_dirs = self.interactive_environment_plugins_directory
self.gie_dirs = [d.strip() for d in (ie_dirs.split(",") if ie_dirs else [])]
- if ie_dirs and not self.visualization_plugins_directory:
- self.visualization_plugins_directory = ie_dirs
- elif ie_dirs:
+ if ie_dirs:
self.visualization_plugins_directory += ",%s" % ie_dirs
- self.gie_swarm_mode = string_as_bool(kwargs.get('interactive_environment_swarm_mode', False))
-
- self.proxy_session_map = resolve_path(kwargs.get("dynamic_proxy_session_map", "session_map.sqlite"), self.data_dir)
+ self.proxy_session_map = os.path.join(self.data_dir, self.dynamic_proxy_session_map)
self.manage_dynamic_proxy = string_as_bool(kwargs.get("dynamic_proxy_manage", "True")) # Set to false if being launched externally
# InteractiveTools propagator mapping file
@@ -599,8 +569,8 @@ def _process_config(self, kwargs):
self.interactivetool_prefix = kwargs.get("interactivetools_prefix", "interactivetool")
self.interactivetools_enable = string_as_bool(kwargs.get('interactivetools_enable', False))
- self.citation_cache_data_dir = resolve_path(kwargs.get("citation_cache_data_dir", "citations/data"), self.data_dir)
- self.citation_cache_lock_dir = resolve_path(kwargs.get("citation_cache_lock_dir", "citations/locks"), self.data_dir)
+ self.citation_cache_data_dir = os.path.join(self.data_dir, self.citation_cache_data_dir)
+ self.citation_cache_lock_dir = os.path.join(self.data_dir, self.citation_cache_lock_dir)
self.containers_conf = parse_containers_config(self.containers_config_file)
@@ -705,6 +675,7 @@ def parse_config_file_options(self, kwargs):
oidc_backends_config_file=[self._in_config_dir('oidc_backends_config.yml')],
oidc_config_file=[self._in_config_dir('oidc_config.yml')],
shed_data_manager_config_file=[self._in_mutable_config_dir('shed_data_manager_conf.xml')],
+ shed_tool_config_file=[self._in_mutable_config_dir('shed_tool_conf.xml')],
shed_tool_data_table_config=[self._in_mutable_config_dir('shed_tool_data_table_conf.xml')],
tool_destinations_config_file=[self._in_config_dir('tool_destinations.yml')],
tool_sheds_config_file=[self._in_config_dir('tool_sheds_conf.xml')],
@@ -712,44 +683,17 @@ def parse_config_file_options(self, kwargs):
workflow_resource_params_file=[self._in_config_dir('workflow_resource_params_conf.xml')],
workflow_schedulers_config_file=[self._in_config_dir('config/workflow_schedulers_conf.xml')],
)
- if running_from_source:
- listify_defaults = {
- 'tool_data_table_config_path': ['config/tool_data_table_conf.xml',
- 'tool_data_table_conf.xml',
- 'lib/galaxy/config/sample/tool_data_table_conf.xml.sample'],
- # rationale:
- # [0]: user has explicitly created config/tool_conf.xml but did not
- # move their existing shed_tool_conf.xml, don't use
- # config/shed_tool_conf.xml, which is probably the empty
- # version copied from the sample, or else their shed tools
- # will disappear
- # [1]: user has created config/tool_conf.xml and, having passed
- # [0], probably moved their shed_tool_conf.xml as well
- # [2]: user has done nothing, use the old files
- # [3]: fresh install (shed_tool_conf will be added later)
- 'tool_config_file': ['config/tool_conf.xml,shed_tool_conf.xml',
- 'config/tool_conf.xml,config/shed_tool_conf.xml',
- 'tool_conf.xml,shed_tool_conf.xml',
- 'lib/galaxy/config/sample/tool_conf.xml.sample']
- }
- else:
- listify_defaults = {
- 'tool_data_table_config_path': [
- self._in_config_dir('tool_data_table_conf.xml'),
- self._in_sample_dir('tool_data_table_conf.xml.sample')],
- 'tool_config_file': [
- self._in_config_dir('tool_conf.xml'),
- self._in_sample_dir('tool_conf.xml.sample')]
- }
+ listify_defaults = {
+ 'tool_data_table_config_path': [
+ self._in_config_dir('tool_data_table_conf.xml'),
+ self._in_sample_dir('tool_data_table_conf.xml.sample')],
+ 'tool_config_file': [
+ self._in_config_dir('tool_conf.xml'),
+ self._in_sample_dir('tool_conf.xml.sample')]
+ }
self._parse_config_file_options(defaults, listify_defaults, kwargs)
- # If the user has configured a shed tool config in tool_config_file
- # this would add a second, but since we're not parsing them yet we
- # don't know if that's the case.
- if not running_from_source and self.shed_tool_conf not in self.tool_config_file:
- self.tool_config_file.append(self.shed_tool_conf)
-
# Backwards compatibility for names used in too many places to fix
self.datatypes_config = self.datatypes_config_file
self.tool_configs = self.tool_config_file
@@ -765,7 +709,7 @@ def reload_sanitize_whitelist(self, explicit=True):
if explicit:
log.warning("Sanitize log file explicitly specified as '%s' but does not exist, continuing with no tools whitelisted.", self.sanitize_whitelist_file)
- def get(self, key, default):
+ def get(self, key, default=None):
return self.config_dict.get(key, default)
def get_bool(self, key, default):
@@ -800,10 +744,8 @@ def check(self):
self._ensure_directory(path)
# Check that required files exist
tool_configs = self.tool_configs
- if self.migrated_tools_config not in tool_configs and os.path.exists(self.migrated_tools_config):
- tool_configs.append(self.migrated_tools_config)
for path in tool_configs:
- if not os.path.exists(path) and path != self.shed_tool_conf:
+ if not os.path.exists(path) and path not in (self.shed_tool_config_file, self.migrated_tools_config):
raise ConfigurationError("Tool config file not found: %s" % path)
for datatypes_config in listify(self.datatypes_config):
if not os.path.isfile(datatypes_config):
@@ -825,7 +767,7 @@ def is_admin_user(self, user):
def resolve_path(self, path):
""" Resolve a path relative to Galaxy's root.
"""
- return resolve_path(path, self.root)
+ return os.path.join(self.root, path)
@staticmethod
def _parse_allowed_origin_hostnames(kwargs):
@@ -960,12 +902,15 @@ def _configure_genome_builds(self, data_table_name="__dbkeys__", load_old_style=
def wait_for_toolbox_reload(self, old_toolbox):
timer = ExecutionTimer()
- while True:
- # Wait till toolbox reload has been triggered
- # (or more than 60 seconds have passed)
- if self.toolbox.has_reloaded(old_toolbox) or timer.elapsed > 60:
+ log.debug('Waiting for toolbox reload')
+ # Wait till toolbox reload has been triggered (or more than 60 seconds have passed)
+ while timer.elapsed < 60:
+ if self.toolbox.has_reloaded(old_toolbox):
+ log.debug('Finished waiting for toolbox reload %s', timer)
break
time.sleep(0.1)
+ else:
+ log.warning('Waiting for toolbox reload timed out after 60 seconds')
def _configure_toolbox(self):
from galaxy import tools
@@ -979,9 +924,26 @@ def _configure_toolbox(self):
from galaxy.managers.tools import DynamicToolManager
self.dynamic_tools_manager = DynamicToolManager(self)
self._toolbox_lock = threading.RLock()
- # Initialize the tools, making sure the list of tool configs includes the reserved migrated_tools_conf.xml file.
+ # Initialize the tools, making sure the list of tool configs includes automatically generated dynamic
+ # (shed-enabled) tool configs, which are created on demand.
tool_configs = self.config.tool_configs
- if self.config.migrated_tools_config not in tool_configs:
+ # If the user has configured a shed tool config in tool_config_file this would add a second, but since we're not
+ # parsing them yet we don't know if that's the case. We'll assume that the standard shed_tool_conf.xml location
+ # is in use, and warn if we suspect there to be problems.
+ if self.config.shed_tool_config_file not in tool_configs:
+ # This seems like the likely case for problems in older deployments
+ if self.config.tool_config_file_set and not self.config.shed_tool_config_file_set:
+ log.warning(
+ "The default shed tool config file (%s) has been added to the tool_config_file option, if this is "
+ "not the desired behavior, please set shed_tool_config_file to your primary shed-enabled tool "
+ "config file"
+ )
+ tool_configs.append(self.config.shed_tool_config_file)
+ # The value of migrated_tools_config is the file reserved for containing only those tools that have been
+ # eliminated from the distribution and moved to the tool shed. If migration checking is disabled, only add it if
+ # it exists (since this may be an existing deployment where migrations were previously run).
+ if ((self.config.check_migrate_tools or os.path.exists(self.config.migrated_tools_config))
+ and self.config.migrated_tools_config not in tool_configs):
tool_configs.append(self.config.migrated_tools_config)
self.toolbox = tools.ToolBox(tool_configs, self.config.tool_path, self)
galaxy_root_dir = os.path.abspath(self.config.root)
diff --git a/lib/galaxy/config/config_manage.py b/lib/galaxy/config/config_manage.py
index fc1a19489b97..0c84b391b8f1 100644
--- a/lib/galaxy/config/config_manage.py
+++ b/lib/galaxy/config/config_manage.py
@@ -15,6 +15,7 @@
import requests
import six
import yaml
+from boltons.iterutils import remap
from six import StringIO
try:
@@ -47,8 +48,8 @@
MISSING_FILTER_TYPE_MESSAGE = "Missing filter type for section [%s], it will be ignored."
UNHANDLED_FILTER_TYPE_MESSAGE = "Unhandled filter type encountered [%s] for section [%s]."
NO_APP_MAIN_MESSAGE = "No app:main section found, using application defaults throughout."
-YAML_COMMENT_WRAPPER = TextWrapper(initial_indent="# ", subsequent_indent="# ")
-RST_DESCRIPTION_WRAPPER = TextWrapper(initial_indent=" ", subsequent_indent=" ")
+YAML_COMMENT_WRAPPER = TextWrapper(initial_indent="# ", subsequent_indent="# ", break_long_words=False)
+RST_DESCRIPTION_WRAPPER = TextWrapper(initial_indent=" ", subsequent_indent=" ", break_long_words=False)
UWSGI_SCHEMA_PATH = "lib/galaxy/webapps/uwsgi_schema.yml"
App = namedtuple(
@@ -396,8 +397,9 @@ def _write_option_rst(args, rst, key, heading_level, option_value):
option, value = _parse_option_value(option_value)
desc = option["desc"]
rst.write(":Description:\n")
- rst.write("\n".join(RST_DESCRIPTION_WRAPPER.wrap(desc)))
- rst.write("\n")
+ # Wrap and indent desc, replacing whitespaces with a space, except
+ # for double newlines which are replaced with a single newline.
+ rst.write("\n".join("\n".join(RST_DESCRIPTION_WRAPPER.wrap(_)) for _ in desc.split("\n\n")) + "\n")
type = option.get("type", None)
default = option.get("default", "*null*")
if default is True:
@@ -505,13 +507,18 @@ def _validate(args, app_desc):
raw_config = _order_load_path(path)
if raw_config.get(app_desc.app_name, None) is None:
raw_config[app_desc.app_name] = {}
- config_p = tempfile.NamedTemporaryFile(delete=False, suffix=".yml")
+ config_p = tempfile.NamedTemporaryFile('w', delete=False, suffix=".yml")
ordered_dump(raw_config, config_p)
config_p.flush()
path = config_p.name
- fp = tempfile.NamedTemporaryFile(delete=False, suffix=".yml")
- ordered_dump(app_desc.schema.raw_schema, fp)
+ fp = tempfile.NamedTemporaryFile('w', delete=False, suffix=".yml")
+
+ def _clean(p, k, v):
+ return k != 'reloadable'
+
+ clean_schema = remap(app_desc.schema.raw_schema, _clean)
+ ordered_dump(clean_schema, fp)
fp.flush()
name = fp.name
if Core is None:
@@ -717,8 +724,9 @@ def _write_option(args, f, key, option_value, as_comment=False, uwsgi_hack=False
desc = option["desc"]
comment = ""
if desc and args.add_comments:
- comment = "\n".join(YAML_COMMENT_WRAPPER.wrap(desc))
- comment += "\n"
+ # Wrap and comment desc, replacing whitespaces with a space, except
+ # for double newlines which are replaced with a single newline.
+ comment += "\n".join("\n".join(YAML_COMMENT_WRAPPER.wrap(_)) for _ in desc.split("\n\n")) + "\n"
as_comment_str = "#" if as_comment else ""
if uwsgi_hack:
if option.get("type", "str") == "bool":
diff --git a/lib/galaxy/config/sample/auth_conf.xml.sample b/lib/galaxy/config/sample/auth_conf.xml.sample
index 34d30693b727..016279a2d1eb 100644
--- a/lib/galaxy/config/sample/auth_conf.xml.sample
+++ b/lib/galaxy/config/sample/auth_conf.xml.sample
@@ -76,6 +76,12 @@
-->
+
+
@@ -781,6 +783,8 @@
+
+
diff --git a/lib/galaxy/config/sample/galaxy.yml.sample b/lib/galaxy/config/sample/galaxy.yml.sample
index 04d10e29e1b1..144e2b8da401 100644
--- a/lib/galaxy/config/sample/galaxy.yml.sample
+++ b/lib/galaxy/config/sample/galaxy.yml.sample
@@ -101,20 +101,22 @@ galaxy:
# The directory that will be prepended to relative paths in options
# specifying other Galaxy config files (e.g. datatypes_config_file).
# Defaults to the directory in which galaxy.yml is located.
- #config_dir: 'false'
+ #config_dir: null
# The directory that will be prepended to relative paths in options
# specifying Galaxy data/cache directories and files (such as the
# default SQLite database, file_path, etc.). Defaults to `database/`
# if running Galaxy from source or `/data` otherwise.
- #data_dir: 'false'
+ #data_dir: null
# By default, Galaxy uses a SQLite database at
# 'database/universe.sqlite'. You may use a SQLAlchemy connection
# string to specify an external database instead. This string takes
# many options which are explained in detail in the config file
# documentation.
- #database_connection: sqlite:///./database/universe.sqlite?isolation_level=IMMEDIATE
+ # Sample default
+ # 'sqlite:////universe.sqlite?isolation_level=IMMEDIATE'
+ #database_connection: null
# If the server logs errors about not having enough database pool
# connections, you will want to increase these values, or consider
@@ -147,13 +149,13 @@ galaxy:
# an existing template database. This will set that. This is probably
# only useful for testing but documentation is included here for
# completeness.
- #database_template: ''
+ #database_template: null
# Slow query logging. Queries slower than the threshold indicated
# below will be logged to debug. A value of '0' is disabled. For
# example, you would set this to .005 to log all queries taking longer
# than 5 milliseconds.
- #slow_query_log_threshold: 0
+ #slow_query_log_threshold: 0.0
# Enables a per request sql debugging option. If this is set to true,
# append ?sql_debug=1 to web request URLs to enable detailed logging
@@ -167,7 +169,8 @@ galaxy:
# instances with pretested installs. The following option can be used
# to separate the tool shed install database (all other options listed
# above but prefixed with install_ are also available).
- #install_database_connection: sqlite:///./database/universe.sqlite?isolation_level=IMMEDIATE
+ # Defaults to the value of the 'database_connection' option.
+ #install_database_connection: null
# Setting the following option to true will cause Galaxy to
# automatically migrate the database forward after updates. This is
@@ -185,20 +188,38 @@ galaxy:
# seconds).
#database_wait_sleep: 1.0
- # Where dataset files are stored. It must accessible at the same path
- # on any cluster nodes that will run Galaxy jobs, unless using Pulsar.
- #file_path: database/files
+ # Where dataset files are stored. It must be accessible at the same
+ # path on any cluster nodes that will run Galaxy jobs, unless using
+ # Pulsar.
+ # Default value will be resolved to 'database/files' where 'database'
+ # is the default value of the 'data_dir' option).
+ #file_path: files
- # Where temporary files are stored. It must accessible at the same
+ # Where temporary files are stored. It must be accessible at the same
# path on any cluster nodes that will run Galaxy jobs, unless using
# Pulsar.
- #new_file_path: database/tmp
+ # Default value will be resolved to 'database/tmp' where 'database' is
+ # the default value of the 'data_dir' option).
+ #new_file_path: tmp
# Tool config files, defines what tools are available in Galaxy. Tools
# can be locally developed or installed from Galaxy tool sheds.
# (config/tool_conf.xml.sample will be used if left unset and
- # config/tool_conf.xml does not exist).
- #tool_config_file: config/tool_conf.xml,config/shed_tool_conf.xml
+ # config/tool_conf.xml does not exist). Can be a single file, a list
+ # of files, or (for backwards compatibility) a comma-separated list of
+ # files.
+ #tool_config_file: config/tool_conf.xml
+
+ # Tool config file for tools installed from the Galaxy Tool Shed. Must
+ # be writable by Galaxy and generally should not be edited by hand. In
+ # older Galaxy releases, this file was part of the tool_config_file
+ # option. It is still possible to specify this file (and other shed-
+ # enabled tool config files) in tool_config_file, but in the standard
+ # case of a single shed-enabled tool config, this option is
+ # preferable. This file will be created automatically upon tool
+ # installation, whereas Galaxy will fail to start if any files in
+ # tool_config_file cannot be read.
+ #shed_tool_config_file: config/shed_tool_conf.xml
# Enable / disable checking if any tools defined in the above non-shed
# tool_config_files (i.e., tool_conf.xml) have been migrated from the
@@ -212,7 +233,9 @@ galaxy:
# migration scripts to install tools that have been migrated to the
# tool shed upon a new release, they will be added to this tool config
# file.
- #migrated_tools_config: config/migrated_tools_conf.xml
+ # Default value will be resolved to 'config/migrated_tools_conf.xml'
+ # where 'config' is the default value of the 'config_dir' option).
+ #migrated_tools_config: migrated_tools_conf.xml
# File that contains the XML section and tool tags from all tool panel
# config files integrated into a single file that defines the tool
@@ -229,26 +252,28 @@ galaxy:
# Various dependency resolver configuration parameters will have
# defaults set relative to this path, such as the default conda
# prefix, default Galaxy packages path, legacy tool shed dependencies
- # path, and the dependency cache directory. Set the string to None to
- # explicitly disable tool dependency handling. If this option is set
- # to none or an invalid path, installing tools with dependencies from
- # the Tool Shed or in Conda will fail.
- #tool_dependency_dir: database/dependencies
+ # path, and the dependency cache directory.
+ # Set the string to null to explicitly disable tool dependency
+ # handling. If this option is set to none or an invalid path,
+ # installing tools with dependencies from the Tool Shed or in Conda
+ # will fail.
+ # Default value will be resolved to 'database/dependencies' where
+ # 'database' is the default value of the 'data_dir' option).
+ #tool_dependency_dir: dependencies
# The dependency resolvers config file specifies an ordering and
# options for how Galaxy resolves tool dependencies (requirement tags
# in Tool XML). The default ordering is to the use the Tool Shed for
# tools installed that way, use local Galaxy packages, and then use
- # Conda if available. See https://github.com/galaxyproject/galaxy/blob
- # /dev/doc/source/admin/dependency_resolvers.rst for more information
- # on these options.
+ # Conda if available. See
+ # https://github.com/galaxyproject/galaxy/blob/dev/doc/source/admin/dependency_resolvers.rst
+ # for more information on these options.
#dependency_resolvers_config_file: config/dependency_resolvers_conf.xml
# conda_prefix is the location on the filesystem where Conda packages
- # and environments are installed IMPORTANT: Due to a current
- # limitation in conda, the total length of the conda_prefix and the
- # job_working_directory path should be less than 50 characters!
- #conda_prefix: /_conda
+ # and environments are installed.
+ # Sample default '/_conda'
+ #conda_prefix: null
# Override the Conda executable to use, it will default to the one on
# the PATH (if available) and then to /bin/conda
@@ -287,14 +312,16 @@ galaxy:
# share. Set this option to true to cache the dependencies in a
# folder. This option is beta and should only be used if you
# experience long waiting times before a job is actually submitted to
- # your cluster. This only affects tools where some requirements can
- # be resolved but not others, most modern best practice tools can use
- # prebuilt environments in the Conda directory.
+ # your cluster.
+ # This only affects tools where some requirements can be resolved but
+ # not others, most modern best practice tools can use prebuilt
+ # environments in the Conda directory.
#use_cached_dependency_manager: false
# By default the tool_dependency_cache_dir is the _cache directory of
- # the tool dependency directory
- #tool_dependency_cache_dir: /_cache
+ # the tool dependency directory.
+ # Sample default '/_cache'
+ #tool_dependency_cache_dir: null
# By default, when using a cached dependency manager, the dependencies
# are cached when installing new tools and when using tools for the
@@ -355,7 +382,7 @@ galaxy:
# container resolvers to use when discovering containers for Galaxy.
# If this is set to None, the default containers loaded is determined
# by enable_mulled_containers.
- #containers_resolvers_config_file: ''
+ #containers_resolvers_config_file: null
# involucro is a tool used to build Docker or Singularity containers
# for tools from Conda dependencies referenced in tools as
@@ -363,7 +390,8 @@ galaxy:
# the Galaxy host. This is ignored if the relevant container resolver
# isn't enabled, and will install on demand unless involucro_auto_init
# is set to false.
- #involucro_path: database/dependencies/involucro
+ # Sample default '/involucro'
+ #involucro_path: null
# Install involucro as needed to build Docker or Singularity
# containers for tools. Ignored if relevant container resolver is not
@@ -414,8 +442,9 @@ galaxy:
#tool_data_path: tool-data
# Directory where Tool Data Table related files will be placed when
- # installed from a ToolShed. Defaults to tool_data_path.
- #shed_tool_data_path: tool-data
+ # installed from a ToolShed. Defaults to the value of the
+ # 'tool_data_path' option.
+ #shed_tool_data_path: null
# Monitor the tool_data and shed_tool_data_path directories. If
# changes in tool data table files are found, the tool data tables for
@@ -428,12 +457,13 @@ galaxy:
# scenarios than the watchdog default.
#watch_tool_data_dir: 'false'
- # File containing old-style genome builds
- #builds_file_path: tool-data/shared/ucsc/builds.txt
+ # File containing old-style genome builds. Value will be resolved with
+ # respect to .
+ #builds_file_path: shared/ucsc/builds.txt
# Directory where chrom len files are kept, currently mainly used by
- # trackster
- #len_file_path: tool-data/shared/ucsc/chrom
+ # trackster. Value will be resolved with respect to .
+ #len_file_path: shared/ucsc/chrom
# Datatypes config file(s), defines what data (file) types are
# available in Galaxy (.sample is used if default does not exist). If
@@ -453,8 +483,8 @@ galaxy:
# Visualizations config directory: where to look for individual
# visualization plugins. The path is relative to the Galaxy root dir.
- # To use an absolute path begin the path with '/'. This is a comma
- # separated list. Defaults to "config/plugins/visualizations".
+ # To use an absolute path begin the path with '/'. This is a comma-
+ # separated list.
#visualization_plugins_directory: config/plugins/visualizations
# Interactive environment plugins root directory: where to look for
@@ -463,25 +493,9 @@ galaxy:
# stock plugins. These will require Docker to be configured and have
# security considerations, so proceed with caution. The path is
# relative to the Galaxy root dir. To use an absolute path begin the
- # path with '/'. This is a comma separated list.
+ # path with '/'. This is a comma-separated list.
#interactive_environment_plugins_directory: null
- # To run interactive environment containers in Docker Swarm mode (on
- # an existing swarm), set this option to true and set
- # `docker_connect_port` in the IE plugin config (ini) file(s) of any
- # IE plugins you have enabled and ensure that you are not using any
- # `docker run`-specific options in your plugins' `command_inject`
- # options (swarm mode services run using `docker service create`,
- # which has a different and more limited set of options). This option
- # can be overridden on a per-plugin basis by using the `swarm_mode`
- # option in the plugin's ini config file.
- #interactive_environment_swarm_mode: false
-
- # Galaxy can run a "swarm manager" service that will monitor
- # utilization of the swarm and provision/deprovision worker nodes as
- # necessary. The service has its own configuration file.
- #swarm_manager_config_file: config/swarm_manager_conf.yml
-
# Interactive tour directory: where to store interactive tour
# definition files. Galaxy ships with several basic interface tours
# enabled, though a different directory with custom tours can be
@@ -501,15 +515,20 @@ galaxy:
# Each job is given a unique empty directory as its current working
# directory. This option defines in what parent directory those
# directories will be created.
- #job_working_directory: database/jobs_directory
+ # Default value will be resolved to 'database/jobs_directory' where
+ # 'database' is the default value of the 'data_dir' option).
+ #job_working_directory: jobs_directory
# If using a cluster, Galaxy will write job scripts and stdout/stderr
# to this directory.
- #cluster_files_directory: database/pbs
+ # Value will be resolved with respect to .
+ #cluster_files_directory: pbs
# Mako templates are compiled as needed and cached for reuse, this
# directory is used for the cache
- #template_cache_path: database/compiled_templates
+ # Default value will be resolved to 'database/compiled_templates'
+ # where 'database' is the default value of the 'data_dir' option).
+ #template_cache_path: compiled_templates
# Set to false to disable various checks Galaxy will do to ensure it
# can run job scripts before attempting to execute or submit them.
@@ -542,13 +561,17 @@ galaxy:
# from external sources such as https://doi.org/ by Galaxy - the
# following parameters can be used to control the caching used to
# store this information.
- #citation_cache_data_dir: database/citations/data
+ # Default value will be resolved to 'database/citations/data' where
+ # 'database' is the default value of the 'data_dir' option).
+ #citation_cache_data_dir: citations/data
# Citation related caching. Tool citations information maybe fetched
# from external sources such as https://doi.org/ by Galaxy - the
# following parameters can be used to control the caching used to
# store this information.
- #citation_cache_lock_dir: database/citations/lock
+ # Default value will be resolved to 'database/citations/locks' where
+ # 'database' is the default value of the 'data_dir' option).
+ #citation_cache_lock_dir: citations/locks
# Configuration file for the object store If this is set and exists,
# it overrides any other objectstore settings.
@@ -565,17 +588,17 @@ galaxy:
# mail through an SMTP server, which you may define here (host:port).
# Galaxy will automatically try STARTTLS but will continue upon
# failure.
- #smtp_server: ''
+ #smtp_server: null
# If your SMTP server requires a username and password, you can
# provide them here (password in cleartext here, but if your server
# supports STARTTLS it will be sent over the network encrypted).
- #smtp_username: ''
+ #smtp_username: null
# If your SMTP server requires a username and password, you can
# provide them here (password in cleartext here, but if your server
# supports STARTTLS it will be sent over the network encrypted).
- #smtp_password: ''
+ #smtp_password: null
# If your SMTP server requires SSL from the beginning of the
# connection
@@ -585,31 +608,34 @@ galaxy:
# list. This is the address used to subscribe to the list. Uncomment
# and leave empty if you want to remove this option from the user
# registration form.
- #mailing_join_addr: galaxy-announce-join@bx.psu.edu
+ # Example value 'galaxy-announce-join@bx.psu.edu'
+ #mailing_join_addr: null
# Datasets in an error state include a link to report the error.
# Those reports will be sent to this address. Error reports are
# disabled if no address is set. Also this email is shown as a
# contact to user in case of Galaxy misconfiguration and other events
# user may encounter.
- #error_email_to: ''
+ #error_email_to: null
# Email address to use in the 'From' field when sending emails for
# account activations, workflow step notifications and password
# resets. We recommend using string in the following format: Galaxy
# Project If not configured, '' will be used.
- #email_from: ''
+ #email_from: null
# URL of the support resource for the galaxy instance. Used in
# activation emails.
- #instance_resource_url: https://galaxyproject.org/
+ # Example value 'https://galaxyproject.org/'
+ #instance_resource_url: null
# E-mail domains blacklist is used for filtering out users that are
# using disposable email address during the registration. If their
# address domain matches any domain in the blacklist, they are refused
# the registration.
- #blacklist_file: config/disposable_email_blacklist.conf
+ # Example value 'config/disposable_email_blacklist.conf'
+ #blacklist_file: null
# Registration warning message is used to discourage people from
# registering multiple accounts. Applies mostly for the main Galaxy
@@ -645,22 +671,23 @@ galaxy:
# You can enter tracking code here to track visitor's behavior through
# your Google Analytics account. Example: UA-XXXXXXXX-Y
- #ga_code: ''
+ #ga_code: null
# Galaxy can display data at various external browsers. These options
# specify which browsers should be available. URLs and builds
- # available at these browsers are defined in the specified files. If
- # use_remote_user is set to true, display application servers will be
- # denied access to Galaxy and so displaying datasets in these sites
+ # available at these browsers are defined in the specified files.
+ # If use_remote_user is set to true, display application servers will
+ # be denied access to Galaxy and so displaying datasets in these sites
# will fail. display_servers contains a list of hostnames which should
# be allowed to bypass security to display datasets. Please be aware
# that there are security implications if this is allowed. More
# details (including required changes to the proxy server config) are
# available in the Apache proxy documentation on the Galaxy Community
- # Hub. The list of servers in this sample config are for the UCSC
- # Main, Test and Archaea browsers, but the default if left commented
- # is to not allow any display sites to bypass security (you must
- # uncomment the line below to allow them).
+ # Hub.
+ # The list of servers in this sample config are for the UCSC Main,
+ # Test and Archaea browsers, but the default if left commented is to
+ # not allow any display sites to bypass security (you must uncomment
+ # the line below to allow them).
#display_servers: hgw1.cse.ucsc.edu,hgw2.cse.ucsc.edu,hgw3.cse.ucsc.edu,hgw4.cse.ucsc.edu,hgw5.cse.ucsc.edu,hgw6.cse.ucsc.edu,hgw7.cse.ucsc.edu,hgw8.cse.ucsc.edu,lowepub.cse.ucsc.edu
# Set this to false to disable the old-style display applications that
@@ -683,14 +710,14 @@ galaxy:
#message_box_visible: false
# Show a message box under the masthead.
- #message_box_content: ''
+ #message_box_content: null
# Class of the message box under the masthead. Possible values are:
# 'info' (the default), 'warning', 'error', 'done'.
#message_box_class: info
# Append "/{brand}" to the "Galaxy" text in the masthead.
- #brand: ''
+ #brand: null
# Format string used when showing date and time information. The
# string may contain: - the directives used by Python time.strftime()
@@ -715,10 +742,11 @@ galaxy:
# URL (with schema http/https) of the Galaxy instance as accessible
# within your local network - if specified used as a default by pulsar
# file staging and Jupyter Docker container for communicating back
- # with Galaxy via the API. If you are attempting to set up GIEs on
- # Mac OS X with Docker Desktop for Mac and your Galaxy instance runs
- # on port 8080 this should be 'http://host.docker.internal:8080'. For
- # more details see https://docs.docker.com/docker-for-mac/networking/
+ # with Galaxy via the API.
+ # If you are attempting to set up GIEs on Mac OS X with Docker Desktop
+ # for Mac and your Galaxy instance runs on port 8080 this should be
+ # 'http://host.docker.internal:8080'. For more details see
+ # https://docs.docker.com/docker-for-mac/networking/
#galaxy_infrastructure_url: http://localhost:8080
# If the above URL cannot be determined ahead of time in dynamic
@@ -738,7 +766,7 @@ galaxy:
#logo_url: /
# The URL linked by the "Galaxy Help" link in the "Help" menu.
- #helpsite_url: ''
+ #helpsite_url: null
# The URL linked by the "Wiki" link in the "Help" menu.
#wiki_url: https://galaxyproject.org/
@@ -765,7 +793,7 @@ galaxy:
# The URL linked by the "Terms and Conditions" link in the "Help"
# menu, as well as on the user registration and login forms and in the
# activation emails.
- #terms_url: ''
+ #terms_url: null
# The URL linked by the "Galaxy Q&A" link in the "Help" menu The
# Galaxy Q&A site is under development; when the site is done, this
@@ -834,7 +862,7 @@ galaxy:
# Redirect. This should be set to the path defined in the nginx
# config as an internal redirect with access to Galaxy's data files
# (see documentation linked above).
- #nginx_x_accel_redirect_base: ''
+ #nginx_x_accel_redirect_base: null
# If using compression in the upstream proxy server, use this option
# to disable gzipping of library .tar.gz and .zip archives, since the
@@ -856,24 +884,24 @@ galaxy:
# in detail in the documentation linked above. The upload store is a
# temporary directory in which files uploaded by the upload module
# will be placed.
- #nginx_upload_store: ''
+ #nginx_upload_store: null
# This value overrides the action set on the file upload form, e.g.
# the web path where the nginx_upload_module has been configured to
# intercept upload requests.
- #nginx_upload_path: ''
+ #nginx_upload_path: null
# Galaxy can also use nginx_upload_module to receive files staged out
# upon job completion by remote job runners (i.e. Pulsar) that
# initiate staging operations on the remote end. See the Galaxy nginx
# documentation for the corresponding nginx configuration.
- #nginx_upload_job_files_store: ''
+ #nginx_upload_job_files_store: null
# Galaxy can also use nginx_upload_module to receive files staged out
# upon job completion by remote job runners (i.e. Pulsar) that
# initiate staging operations on the remote end. See the Galaxy nginx
# documentation for the corresponding nginx configuration.
- #nginx_upload_job_files_path: ''
+ #nginx_upload_job_files_path: null
# Galaxy can upload user files in chunks without using nginx. Enable
# the chunk uploader by specifying a chunk size larger than 0. The
@@ -896,7 +924,9 @@ galaxy:
# The NodeJS dynamic proxy can use an SQLite database or a JSON file
# for IPC, set that here.
- #dynamic_proxy_session_map: database/session_map.sqlite
+ # Default value will be resolved to 'database/session_map.sqlite'
+ # where 'database' is the default value of the 'data_dir' option).
+ #dynamic_proxy_session_map: session_map.sqlite
# Set the port and IP for the dynamic proxy to bind to, this must
# match the external configuration if dynamic_proxy_manage is set to
@@ -940,7 +970,7 @@ galaxy:
# Galaxy instead of a JSON or SQLite file for IPC. If you do not
# specify this, it will be set randomly for you. You should set this
# if you are managing the proxy manually.
- #dynamic_proxy_golang_api_key: ''
+ #dynamic_proxy_golang_api_key: null
# If true, Galaxy will attempt to configure a simple root logger if a
# "loggers" section does not appear in this configuration file.
@@ -968,13 +998,13 @@ galaxy:
# Turn on logging of application events and some user events to the
# database.
- #log_events: true
+ #log_events: false
# Turn on logging of user actions to the database. Actions currently
# logged are grid views, tool searches, and use of "recently" used
# tools menu. The log_events and log_actions functionality will
# eventually be merged.
- #log_actions: true
+ #log_actions: false
# Fluentd configuration. Various events can be logged to the fluentd
# instance configured below by enabling fluent_log.
@@ -1009,12 +1039,12 @@ galaxy:
# Return a Access-Control-Allow-Origin response header that matches
# the Origin header of the request if that Origin hostname matches one
- # of the strings or regular expressions listed here. This is a comma
+ # of the strings or regular expressions listed here. This is a comma-
# separated list of hostname strings or regular expressions beginning
# and ending with /. E.g.
# mysite.com,google.com,usegalaxy.org,/^[\w\.]*example\.com/ See:
# https://developer.mozilla.org/en-US/docs/Web/HTTP/CORS
- #allowed_origin_hostnames: ''
+ #allowed_origin_hostnames: null
# Set to true to use Jupyter nbconvert to build HTML from Jupyter
# notebooks in Galaxy histories. This process may allow users to
@@ -1067,14 +1097,15 @@ galaxy:
# Heartbeat log filename. Can accept the template variables
# {server_name} and {pid}
- #heartbeat_log: heartbeat_{server_name}.log
+ # Sample default 'heartbeat_{server_name}.log'
+ #heartbeat_log: null
# Log to Sentry Sentry is an open source logging and error aggregation
# platform. Setting sentry_dsn will enable the Sentry middleware and
# errors will be sent to the indicated sentry instance. This
# connection string is available in your sentry instance under
# -> Settings -> API Keys.
- #sentry_dsn: ''
+ #sentry_dsn: null
# Sentry slow request logging. Requests slower than the threshold
# indicated below will be sent as events to the configured Sentry
@@ -1115,7 +1146,7 @@ galaxy:
# Add an option to the library upload form which allows administrators
# to upload a directory of files.
- #library_import_dir: ''
+ #library_import_dir: null
# Add an option to the library upload form which allows authorized
# non-administrators to upload a directory of files. The configured
@@ -1123,7 +1154,7 @@ galaxy:
# admin user's Galaxy login ( email ). The non-admin user is
# restricted to uploading files or sub-directories of files contained
# in their directory.
- #user_library_import_dir: ''
+ #user_library_import_dir: null
# If user_library_import_dir is set, this option will auto create a
# library import directory for every user (based on their email) upon
@@ -1137,7 +1168,7 @@ galaxy:
# user with library import permissions can import from anywhere in
# these directories (assuming they are able to create symlinks to
# them).
- #user_library_import_symlink_whitelist: ''
+ #user_library_import_symlink_whitelist: null
# In conjunction or alternatively, Galaxy can restrict user library
# imports to those files that the user can read (by checking basic
@@ -1242,15 +1273,15 @@ galaxy:
# User authentication can be delegated to an upstream proxy server
# (usually Apache). The upstream proxy should set a REMOTE_USER
# header in the request. Enabling remote user disables regular logins.
- # For more information, see: https://docs.galaxyproject.org/en/master/
- # admin/special_topics/apache.html
+ # For more information, see:
+ # https://docs.galaxyproject.org/en/master/admin/special_topics/apache.html
#use_remote_user: false
# If use_remote_user is enabled and your external authentication
# method just returns bare usernames, set a default mail domain to be
# appended to usernames, to become your Galaxy usernames (email
# addresses).
- #remote_user_maildomain: ''
+ #remote_user_maildomain: null
# If use_remote_user is enabled, the header that the upstream proxy
# provides the remote username in defaults to HTTP_REMOTE_USER (the
@@ -1270,7 +1301,7 @@ galaxy:
# If use_remote_user is enabled, you can set this to a URL that will
# log your users out.
- #remote_user_logout_href: ''
+ #remote_user_logout_href: null
# If your proxy and/or authentication source does not normalize e-mail
# addresses or user names being passed to Galaxy - set this option to
@@ -1289,7 +1320,7 @@ galaxy:
# Admin section of the server, and will have access to create users,
# groups, roles, libraries, and more. For more information, see:
# https://galaxyproject.org/admin/
- #admin_users: ''
+ #admin_users: null
# Force everyone to log in (disable anonymous access).
#require_login: false
@@ -1330,8 +1361,9 @@ galaxy:
# correct user show up. This makes less sense on large public Galaxy
# instances where that data shouldn't be exposed. For semi-public
# Galaxies, it may make sense to expose just the username and not
- # email, or vice versa. If enable_beta_gdpr is set to true, then this
- # option will be overridden and set to false.
+ # email, or vice versa.
+ # If enable_beta_gdpr is set to true, then this option will be
+ # overridden and set to false.
#expose_user_name: false
# Expose user list. Setting this to true will expose the user list to
@@ -1340,8 +1372,9 @@ galaxy:
# correct user show up. This makes less sense on large public Galaxy
# instances where that data shouldn't be exposed. For semi-public
# Galaxies, it may make sense to expose just the username and not
- # email, or vice versa. If enable_beta_gdpr is set to true, then this
- # option will be overridden and set to false.
+ # email, or vice versa.
+ # If enable_beta_gdpr is set to true, then this option will be
+ # overridden and set to false.
#expose_user_email: false
# Whitelist for local network addresses for "Upload from URL" dialog.
@@ -1349,7 +1382,7 @@ galaxy:
# space, to prevent users making requests to services which the
# administrator did not intend to expose. Previously, you could
# request any network service that Galaxy might have had access to,
- # even if the user could not normally access it. It should be a comma
+ # even if the user could not normally access it. It should be a comma-
# separated list of IP addresses or IP address/mask, e.g.
# 10.10.10.10,10.0.1.0/24,fd00::/8
#fetch_url_whitelist: null
@@ -1359,18 +1392,21 @@ galaxy:
# usernames from logs and bug reports. It also causes the delete user
# admin action to permanently redact their username and password, but
# not to delete data associated with the account as this is not
- # currently easily implementable. You are responsible for removing
- # personal data from backups. This forces expose_user_email and
- # expose_user_name to be false, and forces user_deletion to be true to
- # support the right to erasure. Please read the GDPR section under
- # the special topics area of the admin documentation.
+ # currently easily implementable.
+ # You are responsible for removing personal data from backups.
+ # This forces expose_user_email and expose_user_name to be false, and
+ # forces user_deletion to be true to support the right to erasure.
+ # Please read the GDPR section under the special topics area of the
+ # admin documentation.
#enable_beta_gdpr: false
# Enable the new container interface for Interactive Environments
#enable_beta_containers_interface: false
# Enable beta workflow modules that should not yet be considered part
- # of Galaxy's stable API.
+ # of Galaxy's stable API. (The module state definitions may change and
+ # workflows built using these modules may not function in the
+ # future.)
#enable_beta_workflow_modules: false
# Default format for the export of workflows. Possible values are 'ga'
@@ -1448,19 +1484,21 @@ galaxy:
# Optional list of email addresses of API users who can make calls on
# behalf of other users.
- #api_allow_run_as: ''
+ #api_allow_run_as: null
# Master key that allows many API admin actions to be used without
# actually having a defined admin user in the database/config. Only
# set this if you need to bootstrap Galaxy, you probably do not want
# to set this on public servers.
- #master_api_key: changethis
+ #master_api_key: null
# Enable access to post-authentication options via OpenID.
#enable_openid: false
# If OpenID is enabled, consumer cache directory to use.
- #openid_consumer_cache_path: database/openid_consumer_cache
+ # Default value will be resolved to 'database/openid_consumer_cache'
+ # where 'database' is the default value of the 'data_dir' option).
+ #openid_consumer_cache_path: openid_consumer_cache
# Enable tool tags (associating tools with tags). This has its own
# option since its implementation has a few performance implications
@@ -1486,11 +1524,11 @@ galaxy:
# options. This should point to a directory containing subdirectories
# matching users' identifier (defaults to e-mail), where Galaxy will
# look for files.
- #ftp_upload_dir: ''
+ #ftp_upload_dir: null
# This should be the hostname of your FTP server, which will be
# provided to users in the help text.
- #ftp_upload_site: ''
+ #ftp_upload_site: null
# User attribute to use as subdirectory in calculating default
# ftp_upload_dir pattern. By default this will be email so a user's
@@ -1541,8 +1579,8 @@ galaxy:
# can separate Galaxy into multiple processes. There are more than
# one way to do this, and they are explained in detail in the
# documentation:
- # https://docs.galaxyproject.org/en/master/admin/scaling.html By
- # default, Galaxy manages and executes jobs from within a single
+ # https://docs.galaxyproject.org/en/master/admin/scaling.html
+ # By default, Galaxy manages and executes jobs from within a single
# process and notifies itself of new jobs via in-memory queues. Jobs
# are run locally on the system on which Galaxy is started. Advanced
# job running capabilities can be configured through the job
@@ -1660,20 +1698,24 @@ galaxy:
# (https://docs.galaxyproject.org/en/master/admin/cluster.html
# #submitting-jobs-as-the-real-user) this script is used to run the
# job script Galaxy generates for a tool execution.
- #drmaa_external_runjob_script: sudo -E scripts/drmaa_external_runner.py --assign_all_groups
+ # Example value 'sudo -E scripts/drmaa_external_runner.py
+ # --assign_all_groups'
+ #drmaa_external_runjob_script: null
# When running DRMAA jobs as the Galaxy user
# (https://docs.galaxyproject.org/en/master/admin/cluster.html
# #submitting-jobs-as-the-real-user) this script is used to kill such
# jobs by Galaxy (e.g. if the user cancels the job).
- #drmaa_external_killjob_script: sudo -E scripts/drmaa_external_killer.py
+ # Example value 'sudo -E scripts/drmaa_external_killer.py'
+ #drmaa_external_killjob_script: null
# When running DRMAA jobs as the Galaxy user
# (https://docs.galaxyproject.org/en/master/admin/cluster.html
# #submitting-jobs-as-the-real-user) this script is used transfer
# permissions back and forth between the Galaxy user and the user that
# is running the job.
- #external_chown_script: sudo -E scripts/external_chown_script.py
+ # Example value 'sudo -E scripts/external_chown_script.py'
+ #external_chown_script: null
# When running DRMAA jobs as the Galaxy user
# (https://docs.galaxyproject.org/en/master/admin/cluster.html
@@ -1698,7 +1740,7 @@ galaxy:
# prior to running tools. This can be especially useful for running
# jobs as the actual user, to remove the need to configure each user's
# environment individually.
- #environment_setup_file: ''
+ #environment_setup_file: null
# Optional file containing job resource data entry fields definition.
# These fields will be presented to users in the tool forms and allow
@@ -1771,18 +1813,18 @@ galaxy:
# The base module(s) that are searched for modules for toolbox
# filtering (https://galaxyproject.org/user-defined-toolbox-filters/)
# functions.
- #toolbox_filter_base_modules: galaxy.tools.toolbox.filters,galaxy.tools.filters
+ #toolbox_filter_base_modules: galaxy.tools.filters,galaxy.tools.toolbox.filters
# Galaxy uses AMQP internally for communicating between processes.
# For example, when reloading the toolbox or locking job execution,
# the process that handled that particular request will tell all
- # others to also reload, lock jobs, etc. For connection examples, see
- # http://docs.celeryproject.org/projects/kombu/en/latest/userguide/con
- # nections.html Without specifying anything here, galaxy will first
- # attempt to use your specified database_connection above. If that's
- # not specified either, Galaxy will automatically create and use a
- # separate sqlite database located in your /database folder
- # (indicated in the commented out line below).
+ # others to also reload, lock jobs, etc. For connection examples, see
+ # http://docs.celeryproject.org/projects/kombu/en/latest/userguide/connections.html
+ # Without specifying anything here, galaxy will first attempt to use
+ # your specified database_connection above. If that's not specified
+ # either, Galaxy will automatically create and use a separate sqlite
+ # database located in your /database folder (indicated in the
+ # commented out line below).
#amqp_internal_connection: sqlalchemy+sqlite:///./database/control.sqlite?isolation_level=IMMEDIATE
# Galaxy real time communication server settings
diff --git a/lib/galaxy/config/sample/reports.yml.sample b/lib/galaxy/config/sample/reports.yml.sample
index 5ca6bdd50081..c3555a89ba08 100644
--- a/lib/galaxy/config/sample/reports.yml.sample
+++ b/lib/galaxy/config/sample/reports.yml.sample
@@ -124,8 +124,9 @@ reports:
# Enables GDPR Compliance mode. This makes several changes to the way
# Galaxy logs and exposes data externally such as removing
- # emails/usernames from logs and bug reports. You are responsible for
- # removing personal data from backups. Please read the GDPR section
- # under the special topics area of the admin documentation.
+ # emails/usernames from logs and bug reports.
+ # You are responsible for removing personal data from backups.
+ # Please read the GDPR section under the special topics area of the
+ # admin documentation.
#enable_beta_gdpr: false
diff --git a/lib/galaxy/config/sample/swarm_manager_conf.yml.sample b/lib/galaxy/config/sample/swarm_manager_conf.yml.sample
deleted file mode 100644
index 31c5632519ce..000000000000
--- a/lib/galaxy/config/sample/swarm_manager_conf.yml.sample
+++ /dev/null
@@ -1,84 +0,0 @@
----
-# Galaxy docker swarm manager configuration file
-#
-# To configure the location of this file, use the `swarm_manager_config_file`
-# setting in galaxy.ini
-
-# When the swarm manager daemonizes, it writes a pid file so that only one
-# manager will run at a time. This is the path to that pid file.
-# {xdg_data_home} will be templated automatically and defaults to
-# ~/.local/share as per the XDG specification
-#pid_file: '{xdg_data_home}/galaxy_swarm_manager.pid'
-
-# Program output will be written to the log
-#log_file: '{xdg_data_home}/galaxy_swarm_manager.log'
-
-# As with GIE plugins, you can modify the base docker command ({docker_args}
-# must be present and will be filled in with the docker subcommand and
-# arguments)
-#command: 'docker {docker_args}'
-
-# Managed services should be started with this string at the beginning of their
-# name. It should match the value of CONTAINER_NAME_PREFIX in
-# lib/galaxy/web/base/interactive_environments.py, so you should not change
-# this unless you change both.
-#service_prefix: galaxy_gie_
-
-# Limits:
-#
-# - max_waiting_services: number of services that should be waiting of each
-# "CPU class" (number of CPUs requested e.g. with --reserve-cpu) before
-# attempting to spawn a node
-# - max_wait_time: number of seconds a service should be waiting before
-# attempting to spawn a node
-# - max_node_idle_time: number of seconds a node should be idle before
-# terminating it
-# - max_node_counts: a dictionary controlling the maximum number of nodes of
-# each CPU class that the swarm manager will attempt to spawn, e.g.:
-# max_node_counts:
-# 1: 10 # spawn up to 10 x 1-CPU nodes
-# 2: 3 # spawn up to 3 x 2-CPU nodes
-# 4: 1 # spawn up to 1 x 4-CPU nodes
-#max_waiting_services: 0
-#max_wait_time: 5
-#max_node_idle_time: 120
-#max_node_counts: {}
-
-# If set, only manage nodes whose swarm hostnames begin with this prefix.
-# Otherwise, attempt to manage all nodes
-#node_prefix: null
-
-# Amount of time to wait for a spawning node to appear in `docker node ls`
-# before considering it failed
-#spawn_wait_time: 30
-
-# Command to run to spawn new nodes. This command should join the node to the
-# swarm. Can include template variables:
-# - {cpu_class}: CPU class as explained above (the value of --reserve-cpu)
-# - {cpus_needed}: Total number of CPUs of the given class needed to run the
-# waiting services
-# If this command does not block until the node is joined to the swarm, make
-# sure it at least completes that step in `spawn_wait_time` once it returns
-# control. This command should return a space-separated list of nodes. If the
-# nodes have a different number of CPUs than the class that they were started
-# for, you can include that number after a colon (e.g. `node1:4`).
-#spawn_command: /bin/true
-
-# Command to run to destroy idle nodes. Can include template variables:
-# - {nodes}: Space-separated list of node names to destroy
-# This command should block until at least the point at which any nodes being
-# deallocated no longer appear in `docker node ls`.
-#destroy_command: /bin/true
-
-# Command to run if either of the above commands failed (e.g. to notify an
-# administrator). Can include template variables:
-# - {failed_command}: Command line of the command that failed
-#command_failure_command: /bin/true
-
-# Number of times to retry spawn/destroy commands before considering them to
-# have failed, and seconds to wait between retries
-#command_retries: 0
-#command_retry_wait: 10
-
-# Stop the swarm manager daemon when there are no services or nodes to manage
-#terminate_when_idle: True
diff --git a/lib/galaxy/config/sample/tool_shed.yml.sample b/lib/galaxy/config/sample/tool_shed.yml.sample
index 59495ea0cd09..ab88a6689ba2 100644
--- a/lib/galaxy/config/sample/tool_shed.yml.sample
+++ b/lib/galaxy/config/sample/tool_shed.yml.sample
@@ -304,9 +304,10 @@ tool_shed:
# Enables GDPR Compliance mode. This makes several changes to the way
# Galaxy logs and exposes data externally such as removing
- # emails/usernames from logs and bug reports. You are responsible for
- # removing personal data from backups. Please read the GDPR section
- # under the special topics area of the admin documentation.
+ # emails/usernames from logs and bug reports.
+ # You are responsible for removing personal data from backups.
+ # Please read the GDPR section under the special topics area of the
+ # admin documentation.
#enable_beta_gdpr: false
# For help on configuring the Advanced proxy features, see:
diff --git a/lib/galaxy/config_watchers.py b/lib/galaxy/config_watchers.py
index 73edf8e5704b..46e9f984eda5 100644
--- a/lib/galaxy/config_watchers.py
+++ b/lib/galaxy/config_watchers.py
@@ -117,9 +117,6 @@ def tool_config_paths(self):
tool_config_paths = []
if hasattr(self.app.config, 'tool_configs'):
tool_config_paths = self.app.config.tool_configs
- if hasattr(self.app.config, 'migrated_tools_config'):
- if self.app.config.migrated_tools_config not in tool_config_paths:
- tool_config_paths.append(self.app.config.migrated_tools_config)
return tool_config_paths
@property
diff --git a/lib/galaxy/datatypes/binary.py b/lib/galaxy/datatypes/binary.py
index 5016ee313f0d..519001ae6b18 100644
--- a/lib/galaxy/datatypes/binary.py
+++ b/lib/galaxy/datatypes/binary.py
@@ -1378,6 +1378,22 @@ def sniff(self, filename):
except Exception:
return False
+ def sniff_table_names(self, filename, table_names):
+ # All table names should be in the schema
+ try:
+ conn = sqlite.connect(filename)
+ c = conn.cursor()
+ tables_query = "SELECT name FROM sqlite_master WHERE type='table' ORDER BY name"
+ result = c.execute(tables_query).fetchall()
+ result = [_[0] for _ in result]
+ for table_name in table_names:
+ if table_name not in result:
+ return False
+ return True
+ except Exception as e:
+ log.warning('%s, sniff Exception: %s', self, e)
+ return False
+
def set_peek(self, dataset, is_multi_byte=False):
if not dataset.dataset.purged:
dataset.peek = "SQLite Database"
@@ -1439,20 +1455,9 @@ def set_meta(self, dataset, overwrite=True, **kwd):
def sniff(self, filename):
if super(GeminiSQLite, self).sniff(filename):
- gemini_table_names = ["gene_detailed", "gene_summary", "resources", "sample_genotype_counts", "sample_genotypes", "samples",
- "variant_impacts", "variants", "version"]
- try:
- conn = sqlite.connect(filename)
- c = conn.cursor()
- tables_query = "SELECT name FROM sqlite_master WHERE type='table' ORDER BY name"
- result = c.execute(tables_query).fetchall()
- result = [_[0] for _ in result]
- for table_name in gemini_table_names:
- if table_name not in result:
- return False
- return True
- except Exception as e:
- log.warning('%s, sniff Exception: %s', self, e)
+ table_names = ["gene_detailed", "gene_summary", "resources", "sample_genotype_counts",
+ "sample_genotypes", "samples", "variant_impacts", "variants", "version"]
+ return self.sniff_table_names(filename, table_names)
return False
def set_peek(self, dataset, is_multi_byte=False):
@@ -1514,20 +1519,8 @@ def set_meta(self, dataset, overwrite=True, **kwd):
def sniff(self, filename):
if super(CuffDiffSQlite, self).sniff(filename):
# These tables should be in any CuffDiff SQLite output.
- cuffdiff_table_names = ['CDS', 'genes', 'isoforms', 'replicates',
- 'runInfo', 'samples', 'TSS']
- try:
- conn = sqlite.connect(filename)
- c = conn.cursor()
- tables_query = "SELECT name FROM sqlite_master WHERE type='table' ORDER BY name"
- result = c.execute(tables_query).fetchall()
- result = [_[0] for _ in result]
- for table_name in cuffdiff_table_names:
- if table_name not in result:
- return False
- return True
- except Exception as e:
- log.warning('%s, sniff Exception: %s', self, e)
+ table_names = ['CDS', 'genes', 'isoforms', 'replicates', 'runInfo', 'samples', 'TSS']
+ return self.sniff_table_names(filename, table_names)
return False
def set_peek(self, dataset, is_multi_byte=False):
@@ -1554,19 +1547,9 @@ def set_meta(self, dataset, overwrite=True, **kwd):
def sniff(self, filename):
if super(MzSQlite, self).sniff(filename):
- mz_table_names = ["DBSequence", "Modification", "Peaks", "Peptide", "PeptideEvidence", "Score", "SearchDatabase", "Source", "SpectraData", "Spectrum", "SpectrumIdentification"]
- try:
- conn = sqlite.connect(filename)
- c = conn.cursor()
- tables_query = "SELECT name FROM sqlite_master WHERE type='table' ORDER BY name"
- result = c.execute(tables_query).fetchall()
- result = [_[0] for _ in result]
- for table_name in mz_table_names:
- if table_name not in result:
- return False
- return True
- except Exception as e:
- log.warning('%s, sniff Exception: %s', self, e)
+ table_names = ["DBSequence", "Modification", "Peaks", "Peptide", "PeptideEvidence",
+ "Score", "SearchDatabase", "Source", "SpectraData", "Spectrum", "SpectrumIdentification"]
+ return self.sniff_table_names(filename, table_names)
return False
@@ -1589,19 +1572,90 @@ def set_meta(self, dataset, overwrite=True, **kwd):
def sniff(self, filename):
if super(BlibSQlite, self).sniff(filename):
- blib_table_names = ['IonMobilityTypes', 'LibInfo', 'Modifications', 'RefSpectra', 'RefSpectraPeakAnnotations', 'RefSpectraPeaks', 'ScoreTypes', 'SpectrumSourceFiles']
- try:
- conn = sqlite.connect(filename)
- c = conn.cursor()
- tables_query = "SELECT name FROM sqlite_master WHERE type='table' ORDER BY name"
- result = c.execute(tables_query).fetchall()
- result = [_[0] for _ in result]
- for table_name in blib_table_names:
- if table_name not in result:
- return False
- return True
- except Exception as e:
- log.warning('%s, sniff Exception: %s', self, e)
+ table_names = ['IonMobilityTypes', 'LibInfo', 'Modifications', 'RefSpectra',
+ 'RefSpectraPeakAnnotations', 'RefSpectraPeaks', 'ScoreTypes', 'SpectrumSourceFiles']
+ return self.sniff_table_names(filename, table_names)
+ return False
+
+
+class DlibSQlite(SQlite):
+ """
+ Class describing a Proteomics Spectral Library Sqlite database
+ DLIBs only have the "entries", "metadata", and "peptidetoprotein" tables populated.
+ ELIBs have the rest of the tables populated too, such as "peptidequants" or "peptidescores".
+
+ >>> from galaxy.datatypes.sniff import get_test_fname
+ >>> fname = get_test_fname('test.dlib')
+ >>> DlibSQlite().sniff(fname)
+ True
+ >>> fname = get_test_fname('interval.interval')
+ >>> DlibSQlite().sniff(fname)
+ False
+ """
+ MetadataElement(name="dlib_version", default='1.8', param=MetadataParameter, desc="Dlib Version",
+ readonly=True, visible=True, no_value='1.8')
+ file_ext = "dlib"
+
+ def set_meta(self, dataset, overwrite=True, **kwd):
+ super(DlibSQlite, self).set_meta(dataset, overwrite=overwrite, **kwd)
+ try:
+ conn = sqlite.connect(dataset.file_name)
+ c = conn.cursor()
+ tables_query = "SELECT Value FROM metadata WHERE Key = 'version'"
+ version = c.execute(tables_query).fetchall()[0]
+ dataset.metadata.dlib_version = '%s' % (version)
+ except Exception as e:
+ log.warning('%s, set_meta Exception: %s', self, e)
+
+ def sniff(self, filename):
+ if super(DlibSQlite, self).sniff(filename):
+ table_names = ['entries', 'metadata', 'peptidetoprotein']
+ return self.sniff_table_names(filename, table_names)
+ return False
+
+
+class ElibSQlite(SQlite):
+ """
+ Class describing a Proteomics Chromatagram Library Sqlite database
+ DLIBs only have the "entries", "metadata", and "peptidetoprotein" tables populated.
+ ELIBs have the rest of the tables populated too, such as "peptidequants" or "peptidescores".
+
+ >>> from galaxy.datatypes.sniff import get_test_fname
+ >>> fname = get_test_fname('test.elib')
+ >>> ElibSQlite().sniff(fname)
+ True
+ >>> fname = get_test_fname('test.dlib')
+ >>> ElibSQlite().sniff(fname)
+ False
+ """
+ MetadataElement(name="version", default='0.1.14', param=MetadataParameter, desc="Elib Version",
+ readonly=True, visible=True, no_value='0.1.14')
+ file_ext = "elib"
+
+ def set_meta(self, dataset, overwrite=True, **kwd):
+ super(ElibSQlite, self).set_meta(dataset, overwrite=overwrite, **kwd)
+ try:
+ conn = sqlite.connect(dataset.file_name)
+ c = conn.cursor()
+ tables_query = "SELECT Value FROM metadata WHERE Key = 'version'"
+ version = c.execute(tables_query).fetchall()[0]
+ dataset.metadata.dlib_version = '%s' % (version)
+ except Exception as e:
+ log.warning('%s, set_meta Exception: %s', self, e)
+
+ def sniff(self, filename):
+ if super(ElibSQlite, self).sniff(filename):
+ table_names = ['entries', 'fragmentquants', 'metadata', 'peptidelocalizations', 'peptidequants',
+ 'peptidescores', 'peptidetoprotein', 'proteinscores', 'retentiontimes']
+ if self.sniff_table_names(filename, table_names):
+ try:
+ conn = sqlite.connect(filename)
+ c = conn.cursor()
+ row_query = "SELECT count(*) FROM peptidescores"
+ count = c.execute(row_query).fetchone()[0]
+ return int(count) > 0
+ except Exception as e:
+ log.warning('%s, sniff Exception: %s', self, e)
return False
@@ -1624,19 +1678,9 @@ def set_meta(self, dataset, overwrite=True, **kwd):
def sniff(self, filename):
if super(IdpDB, self).sniff(filename):
- mz_table_names = ["About", "Analysis", "AnalysisParameter", "PeptideSpectrumMatch", "Spectrum", "SpectrumSource"]
- try:
- conn = sqlite.connect(filename)
- c = conn.cursor()
- tables_query = "SELECT name FROM sqlite_master WHERE type='table' ORDER BY name"
- result = c.execute(tables_query).fetchall()
- result = [_[0] for _ in result]
- for table_name in mz_table_names:
- if table_name not in result:
- return False
- return True
- except Exception as e:
- log.warning('%s, sniff Exception: %s', self, e)
+ table_names = ["About", "Analysis", "AnalysisParameter", "PeptideSpectrumMatch",
+ "Spectrum", "SpectrumSource"]
+ return self.sniff_table_names(filename, table_names)
return False
def set_peek(self, dataset, is_multi_byte=False):
@@ -1676,14 +1720,9 @@ def set_meta(self, dataset, overwrite=True, **kwd):
log.warning("%s, set_meta Exception: %s", self, e)
def sniff(self, filename):
- if super(GAFASQLite, self).sniff(filename):
- gafa_table_names = frozenset(['gene', 'gene_family', 'gene_family_member', 'meta', 'transcript'])
- conn = sqlite.connect(filename)
- c = conn.cursor()
- tables_query = "SELECT name FROM sqlite_master WHERE type='table' ORDER BY name"
- results = c.execute(tables_query).fetchall()
- found_table_names = frozenset(_[0] for _ in results)
- return gafa_table_names <= found_table_names
+ if super(IdpDB, self).sniff(filename):
+ table_names = frozenset(['gene', 'gene_family', 'gene_family_member', 'meta', 'transcript'])
+ return self.sniff_table_names(filename, table_names)
return False
diff --git a/lib/galaxy/datatypes/test/test.dlib b/lib/galaxy/datatypes/test/test.dlib
new file mode 100644
index 000000000000..1f2c26e098e3
Binary files /dev/null and b/lib/galaxy/datatypes/test/test.dlib differ
diff --git a/lib/galaxy/datatypes/test/test.elib b/lib/galaxy/datatypes/test/test.elib
new file mode 100644
index 000000000000..b6d6b02a63d8
Binary files /dev/null and b/lib/galaxy/datatypes/test/test.elib differ
diff --git a/lib/galaxy/dependencies/pipfiles/default/pinned-requirements.txt b/lib/galaxy/dependencies/pipfiles/default/pinned-requirements.txt
index cbe54e07ef53..84343d84dad8 100644
--- a/lib/galaxy/dependencies/pipfiles/default/pinned-requirements.txt
+++ b/lib/galaxy/dependencies/pipfiles/default/pinned-requirements.txt
@@ -121,7 +121,7 @@ pbr==5.4.2
prettytable==0.7.2
prov==1.5.1
psutil==5.6.3
-pulsar-galaxy-lib==0.13.0
+pulsar-galaxy-lib==0.14.0.dev0
pyasn1-modules==0.2.6
pyasn1==0.4.6
pycparser==2.19
diff --git a/lib/galaxy/job_execution/datasets.py b/lib/galaxy/job_execution/datasets.py
index 9caa5cff1a51..ee2d583320fd 100644
--- a/lib/galaxy/job_execution/datasets.py
+++ b/lib/galaxy/job_execution/datasets.py
@@ -23,12 +23,18 @@ def __init__(
real_path,
false_path=None,
false_extra_files_path=None,
- mutable=True
+ false_metadata_path=None,
+ mutable=True,
+ dataset_uuid=None,
+ object_store_id=None,
):
self.dataset_id = dataset_id
+ self.dataset_uuid = dataset_uuid
+ self.object_store_id = object_store_id
self.real_path = real_path
self.false_path = false_path
self.false_extra_files_path = false_extra_files_path
+ self.false_metadata_path = false_metadata_path
self.mutable = mutable
def __str__(self):
@@ -37,7 +43,7 @@ def __str__(self):
else:
return self.false_path
- def with_path_for_job(self, false_path, false_extra_files_path=None):
+ def with_path_for_job(self, false_path, false_extra_files_path=None, false_metadata_path=None):
"""
Clone the dataset path but with a new false_path.
"""
@@ -48,6 +54,7 @@ def with_path_for_job(self, false_path, false_extra_files_path=None):
real_path=self.real_path,
false_path=false_path,
false_extra_files_path=false_extra_files_path,
+ false_metadata_path=false_metadata_path,
mutable=self.mutable,
)
return dataset_path
@@ -82,14 +89,18 @@ class OutputsToWorkingDirectoryPathRewriter(object):
is responsible for copying these out after job is complete.
"""
- def __init__(self, working_directory):
+ def __init__(self, working_directory, outputs_directory_name):
self.working_directory = working_directory
+ self.outputs_directory_name = outputs_directory_name
def rewrite_dataset_path(self, dataset, dataset_type):
""" Keep path the same.
"""
if dataset_type == 'output':
- false_path = os.path.abspath(os.path.join(self.working_directory, "galaxy_dataset_%d.dat" % dataset.id))
+ base_output_directory = os.path.abspath(self.working_directory)
+ if self.outputs_directory_name is not None:
+ base_output_directory = os.path.join(base_output_directory, self.outputs_directory_name)
+ false_path = os.path.join(base_output_directory, "galaxy_dataset_%d.dat" % dataset.id)
return false_path
else:
return None
diff --git a/lib/galaxy/jobs/__init__.py b/lib/galaxy/jobs/__init__.py
index 54598d867d85..f66b3d64cedc 100644
--- a/lib/galaxy/jobs/__init__.py
+++ b/lib/galaxy/jobs/__init__.py
@@ -19,6 +19,7 @@
from json import loads
from xml.etree import ElementTree
+import packaging.version
import six
import yaml
from pulsar.client.staging import COMMAND_VERSION_FILENAME
@@ -168,6 +169,10 @@ def job_config_xml_to_dict(config, root):
environment["metrics"] = metrics_to_dict
params = JobConfiguration.get_params(config, destination)
+ # Handle legacy XML enabling sudo when using docker by default.
+ if "docker_sudo" not in params:
+ params["docker_sudo"] = "true"
+
# TODO: handle enabled/disabled in configure_from
environment['params'] = params
environment['env'] = JobConfiguration.get_envs(destination)
@@ -894,7 +899,8 @@ def _job_dataset_path_rewriter(self):
if self._dataset_path_rewriter is None:
outputs_to_working_directory = util.asbool(self.get_destination_configuration("outputs_to_working_directory", False))
if outputs_to_working_directory:
- self._dataset_path_rewriter = OutputsToWorkingDirectoryPathRewriter(self.working_directory)
+ output_directory = self.outputs_directory
+ self._dataset_path_rewriter = OutputsToWorkingDirectoryPathRewriter(self.working_directory, output_directory)
else:
self._dataset_path_rewriter = NullDatasetPathRewriter()
return self._dataset_path_rewriter
@@ -903,6 +909,17 @@ def _job_dataset_path_rewriter(self):
def dataset_path_rewriter(self):
return self._job_dataset_path_rewriter
+ @property
+ def outputs_directory(self):
+ """Default location of ``outputs_to_working_directory``.
+ """
+ return None if self.created_with_galaxy_version < packaging.version.parse("20.01") else "outputs"
+
+ @property
+ def created_with_galaxy_version(self):
+ galaxy_version = self.get_job().galaxy_version or "19.05"
+ return packaging.version.parse(galaxy_version)
+
@property
def dependency_shell_commands(self):
"""Shell fragment to inject dependencies."""
@@ -1802,12 +1819,21 @@ def get_input_paths(self, job=None):
paths = []
for da in job.input_datasets + job.input_library_datasets: # da is JobToInputDatasetAssociation object
if da.dataset:
- filenames = self.get_input_dataset_fnames(da.dataset)
- for real_path in filenames:
- false_path = self.dataset_path_rewriter.rewrite_dataset_path(da.dataset, 'input')
- paths.append(DatasetPath(da.id, real_path=real_path, false_path=false_path, mutable=False))
+ paths.append(self.get_input_path(da.dataset))
return paths
+ def get_input_path(self, dataset):
+ real_path = dataset.file_name
+ false_path = self.dataset_path_rewriter.rewrite_dataset_path(dataset, 'input')
+ return DatasetPath(
+ dataset.dataset.id,
+ real_path=real_path,
+ false_path=false_path,
+ mutable=False,
+ dataset_uuid=dataset.dataset.uuid,
+ object_store_id=dataset.dataset.object_store_id,
+ )
+
def get_output_basenames(self):
return [os.path.basename(str(fname)) for fname in self.get_output_fnames()]
@@ -1816,6 +1842,16 @@ def get_output_fnames(self):
self.compute_outputs()
return self.output_paths
+ def get_output_path(self, dataset):
+ if self.output_paths is None:
+ self.compute_outputs()
+ for (hda, dataset_path) in self.output_hdas_and_paths.values():
+ if hda == dataset:
+ return dataset_path
+ if getattr(dataset, "fake_dataset_association", False):
+ return dataset.file_name
+ raise KeyError("Couldn't find job output for [%s] in [%s]" % (dataset, self.output_hdas_and_paths.values()))
+
def get_mutable_output_fnames(self):
if self.output_paths is None:
self.compute_outputs()
@@ -2121,7 +2157,7 @@ def galaxy_system_pwent(self):
def get_output_destination(self, output_path):
"""
Destination for outputs marked as from_work_dir. This is the normal case,
- just copy these files directly to the ulimate destination.
+ just copy these files directly to the ultimate destination.
"""
return output_path
@@ -2344,12 +2380,28 @@ def output_names(self):
""" Output unqualified filenames defined by job. """
@abstractmethod
- def output_paths(self):
- """ Output DatasetPaths defined by job. """
+ def input_path_rewrite(self, dataset):
+ """Input path for specified dataset."""
+
+ @abstractmethod
+ def output_path_rewrite(self, dataset):
+ """Output path for specified dataset."""
+
+ @abstractmethod
+ def input_extra_files_rewrite(self, dataset):
+ """Input extra files path rewrite for specified dataset."""
+
+ @abstractmethod
+ def output_extra_files_rewrite(self, dataset):
+ """Output extra files path rewrite for specified dataset."""
+
+ @abstractmethod
+ def input_metadata_rewrite(self, dataset, metadata_value):
+ """Input metadata path rewrite for specified dataset."""
@abstractmethod
- def input_paths(self):
- """ Input DatasetPaths defined by job. """
+ def unstructured_path_rewrite(self, path):
+ """Rewrite loc file paths, etc.."""
@abstractmethod
def working_directory(self):
@@ -2376,14 +2428,6 @@ def tool_directory(self):
def version_path(self):
""" Location of the version file for the underlying tool. """
- @abstractmethod
- def unstructured_path_rewriter(self):
- """ Return a function that takes in a value, determines if it is path
- to be rewritten (will be passed non-path values as well - onus is on
- this function to determine both if its input is a path and if it should
- be rewritten.)
- """
-
@abstractmethod
def home_directory(self):
"""Home directory of target job - none if HOME should not be set."""
@@ -2396,14 +2440,11 @@ def tmp_directory(self):
class SimpleComputeEnvironment(object):
def config_directory(self):
- return self.working_directory()
+ return os.path.join(self.working_directory(), "configs")
def sep(self):
return os.path.sep
- def unstructured_path_rewriter(self):
- return lambda v: v
-
class SharedComputeEnvironment(SimpleComputeEnvironment):
""" Default ComputeEnviornment for job and task wrapper to pass
@@ -2422,8 +2463,27 @@ def output_names(self):
def output_paths(self):
return self.job_wrapper.get_output_fnames()
- def input_paths(self):
- return self.job_wrapper.get_input_paths(self.job)
+ def input_path_rewrite(self, dataset):
+ return self.job_wrapper.get_input_path(dataset).false_path
+
+ def output_path_rewrite(self, dataset):
+ dataset_path = self.job_wrapper.get_output_path(dataset)
+ if hasattr(dataset_path, "false_path"):
+ return dataset_path.false_path
+ else:
+ return dataset_path
+
+ def input_extra_files_rewrite(self, dataset):
+ return None
+
+ def output_extra_files_rewrite(self, dataset):
+ return None
+
+ def input_metadata_rewrite(self, dataset, metadata_value):
+ return None
+
+ def unstructured_path_rewrite(self, path):
+ return None
def working_directory(self):
return self.job_wrapper.working_directory
diff --git a/lib/galaxy/jobs/command_factory.py b/lib/galaxy/jobs/command_factory.py
index 17211cf1bc1e..6714eba68c17 100644
--- a/lib/galaxy/jobs/command_factory.py
+++ b/lib/galaxy/jobs/command_factory.py
@@ -101,7 +101,7 @@ def build_command(
# Remove the working directory incase this is for instance a SLURM re-submission.
# xref https://github.com/galaxyproject/galaxy/issues/3289
- commands_builder.prepend_command("rm -rf working; mkdir -p working; cd working")
+ commands_builder.prepend_command("rm -rf working outputs; mkdir -p working outputs; cd working")
container_monitor_command = job_wrapper.container_monitor_command(container)
if container_monitor_command:
@@ -158,7 +158,7 @@ def __externalize_commands(job_wrapper, shell, commands_builder, remote_command_
commands = "%s %s" % (shell, join(remote_command_params['script_directory'], script_name))
for_pulsar = True
if not for_pulsar:
- commands += " > ../tool_stdout 2> ../tool_stderr"
+ commands += " > ../outputs/tool_stdout 2> ../outputs/tool_stderr"
log.info("Built script [%s] for tool command [%s]" % (local_container_script, tool_commands))
return commands
diff --git a/lib/galaxy/jobs/runners/__init__.py b/lib/galaxy/jobs/runners/__init__.py
index 5688f3c7a6e5..0abde8793e1d 100644
--- a/lib/galaxy/jobs/runners/__init__.py
+++ b/lib/galaxy/jobs/runners/__init__.py
@@ -296,7 +296,7 @@ def get_work_dir_outputs(self, job_wrapper, job_working_directory=None, tool_wor
output_paths = {}
for dataset_path in job_wrapper.get_output_fnames():
path = dataset_path.real_path
- if self.app.config.outputs_to_working_directory:
+ if job_wrapper.get_destination_configuration("outputs_to_working_directory", False):
path = dataset_path.false_path
output_paths[dataset_path.dataset_id] = path
@@ -475,8 +475,12 @@ def _finish_or_resubmit_job(self, job_state, job_stdout, job_stderr, job_id=None
job = job_state.job_wrapper.get_job()
exit_code = job_state.read_exit_code()
- tool_stdout_path = os.path.join(job_wrapper.working_directory, "tool_stdout")
- tool_stderr_path = os.path.join(job_wrapper.working_directory, "tool_stderr")
+ outputs_directory = os.path.join(job_wrapper.working_directory, "outputs")
+ if not os.path.exists(outputs_directory):
+ outputs_directory = job_wrapper.working_directory
+
+ tool_stdout_path = os.path.join(outputs_directory, "tool_stdout")
+ tool_stderr_path = os.path.join(outputs_directory, "tool_stderr")
# TODO: These might not exist for running jobs at the upgrade to 19.XX, remove that
# assumption in 20.XX.
if os.path.exists(tool_stdout_path):
diff --git a/lib/galaxy/jobs/runners/pulsar.py b/lib/galaxy/jobs/runners/pulsar.py
index 73675de90277..252c74aa76c9 100644
--- a/lib/galaxy/jobs/runners/pulsar.py
+++ b/lib/galaxy/jobs/runners/pulsar.py
@@ -16,6 +16,9 @@
import yaml
from pulsar.client import (
build_client_manager,
+ CLIENT_INPUT_PATH_TYPES,
+ ClientInput,
+ ClientInputs,
ClientJobDescription,
ClientOutputs,
finish_job as pulsar_finish_job,
@@ -286,12 +289,37 @@ def queue_job(self, job_wrapper):
try:
dependencies_description = PulsarJobRunner.__dependencies_description(client, job_wrapper)
rewrite_paths = not PulsarJobRunner.__rewrite_parameters(client)
- unstructured_path_rewrites = {}
+ path_rewrites_unstructured = {}
output_names = []
if compute_environment:
- unstructured_path_rewrites = compute_environment.unstructured_path_rewrites
+ path_rewrites_unstructured = compute_environment.path_rewrites_unstructured
output_names = compute_environment.output_names()
+ client_inputs_list = []
+ for input_dataset_wrapper in job_wrapper.get_input_paths():
+ # str here to resolve false_path if set on a DatasetPath object.
+ path = str(input_dataset_wrapper)
+ object_store_ref = {
+ "dataset_id": input_dataset_wrapper.dataset_id,
+ "dataset_uuid": str(input_dataset_wrapper.dataset_uuid),
+ "object_store_id": input_dataset_wrapper.object_store_id,
+ }
+ client_inputs_list.append(ClientInput(path, CLIENT_INPUT_PATH_TYPES.INPUT_PATH, object_store_ref=object_store_ref))
+
+ for input_extra_path in compute_environment.path_rewrites_input_extra.keys():
+ # TODO: track dataset for object_Store_ref...
+ client_inputs_list.append(ClientInput(input_extra_path, CLIENT_INPUT_PATH_TYPES.INPUT_EXTRA_FILES_PATH))
+
+ for input_metadata_path in compute_environment.path_rewrites_input_metadata.keys():
+ # TODO: track dataset for object_Store_ref...
+ client_inputs_list.append(ClientInput(input_metadata_path, CLIENT_INPUT_PATH_TYPES.INPUT_METADATA_PATH))
+
+ input_files = None
+ client_inputs = ClientInputs(client_inputs_list)
+ else:
+ input_files = self.get_input_files(job_wrapper)
+ client_inputs = None
+
if self.app.config.metadata_strategy == "legacy":
# Drop this branch in 19.09.
metadata_directory = job_wrapper.working_directory
@@ -299,14 +327,16 @@ def queue_job(self, job_wrapper):
metadata_directory = os.path.join(job_wrapper.working_directory, "metadata")
remote_pulsar_app_config = job_destination.params.get("pulsar_app_config", {})
+ job_directory_files = []
config_files = job_wrapper.extra_filenames
tool_script = os.path.join(job_wrapper.working_directory, "tool_script.sh")
if os.path.exists(tool_script):
log.debug("Registering tool_script for Pulsar transfer [%s]" % tool_script)
- config_files.append(tool_script)
+ job_directory_files.append(tool_script)
client_job_description = ClientJobDescription(
command_line=command_line,
- input_files=self.get_input_files(job_wrapper),
+ input_files=input_files,
+ client_inputs=client_inputs, # Only one of these input defs should be non-None
client_outputs=self.__client_outputs(client, job_wrapper),
working_directory=job_wrapper.tool_working_directory,
metadata_directory=metadata_directory,
@@ -315,9 +345,10 @@ def queue_job(self, job_wrapper):
dependencies_description=dependencies_description,
env=client.env,
rewrite_paths=rewrite_paths,
- arbitrary_files=unstructured_path_rewrites,
+ arbitrary_files=path_rewrites_unstructured,
touch_outputs=output_names,
remote_pulsar_app_config=remote_pulsar_app_config,
+ job_directory_files=job_directory_files,
container=None if not remote_container else remote_container.container_id,
)
job_id = pulsar_submit_job(client, client_job_description, remote_job_config)
@@ -378,12 +409,9 @@ def __prepare_job(self, job_wrapper, job_destination):
remote_working_directory = remote_job_config['working_directory']
remote_job_directory = os.path.abspath(os.path.join(remote_working_directory, os.path.pardir))
remote_tool_directory = os.path.abspath(os.path.join(remote_job_directory, "tool_files"))
- # This should be remote_job_directory ideally, this patch using configs is a workaround for
- # older Pulsar versions that didn't support writing stuff to the job directory natively.
- script_directory = os.path.join(remote_job_directory, "configs")
remote_command_params = dict(
working_directory=remote_job_config['metadata_directory'],
- script_directory=script_directory,
+ script_directory=remote_job_directory,
metadata_kwds=metadata_kwds,
dependency_resolution=dependency_resolution,
)
@@ -880,11 +908,13 @@ def __init__(self, pulsar_client, job_wrapper, remote_job_config):
self.pulsar_client = pulsar_client
self.job_wrapper = job_wrapper
self.local_path_config = job_wrapper.default_compute_environment()
- self.unstructured_path_rewrites = {}
+
+ self.path_rewrites_unstructured = {}
+ self.path_rewrites_input_extra = {}
+ self.path_rewrites_input_metadata = {}
+
# job_wrapper.prepare is going to expunge the job backing the following
# computations, so precalculate these paths.
- self._wrapper_input_paths = self.local_path_config.input_paths()
- self._wrapper_output_paths = self.local_path_config.output_paths()
self.path_mapper = PathMapper(pulsar_client, remote_job_config, self.local_path_config.working_directory())
self._config_directory = remote_job_config["configs_directory"]
self._working_directory = remote_job_config["working_directory"]
@@ -900,34 +930,62 @@ def output_names(self):
# Maybe this should use the path mapper, but the path mapper just uses basenames
return self.job_wrapper.get_output_basenames()
- def output_paths(self):
- local_output_paths = self._wrapper_output_paths
-
- results = []
- for local_output_path in local_output_paths:
- wrapper_path = str(local_output_path)
- remote_path = self.path_mapper.remote_output_path_rewrite(wrapper_path)
- results.append(self._dataset_path(local_output_path, remote_path))
- return results
-
- def input_paths(self):
- local_input_paths = self._wrapper_input_paths
-
- results = []
- for local_input_path in local_input_paths:
- wrapper_path = str(local_input_path)
- # This will over-copy in some cases. For instance in the case of task
- # splitting, this input will be copied even though only the work dir
- # input will actually be used.
- remote_path = self.path_mapper.remote_input_path_rewrite(wrapper_path)
- results.append(self._dataset_path(local_input_path, remote_path))
- return results
-
- def _dataset_path(self, local_dataset_path, remote_path):
- remote_extra_files_path = None
- if remote_path:
- remote_extra_files_path = "%s_files" % remote_path[0:-len(".dat")]
- return local_dataset_path.with_path_for_job(remote_path, remote_extra_files_path)
+ def input_path_rewrite(self, dataset):
+ local_input_path_rewrite = self.local_path_config.input_path_rewrite(dataset)
+ if local_input_path_rewrite is not None:
+ local_input_path = local_input_path_rewrite
+ else:
+ local_input_path = dataset.file_name
+ remote_path = self.path_mapper.remote_input_path_rewrite(local_input_path)
+ return remote_path
+
+ def output_path_rewrite(self, dataset):
+ local_output_path_rewrite = self.local_path_config.output_path_rewrite(dataset)
+ if local_output_path_rewrite is not None:
+ local_output_path = local_output_path_rewrite
+ else:
+ local_output_path = dataset.file_name
+ remote_path = self.path_mapper.remote_output_path_rewrite(local_output_path)
+ return remote_path
+
+ def input_extra_files_rewrite(self, dataset):
+ input_path_rewrite = self.input_path_rewrite(dataset)
+ base_input_path = input_path_rewrite[0:-len(".dat")]
+ remote_extra_files_path_rewrite = "%s_files" % base_input_path
+ self.path_rewrites_input_extra[dataset.extra_files_path] = remote_extra_files_path_rewrite
+ return remote_extra_files_path_rewrite
+
+ def output_extra_files_rewrite(self, dataset):
+ output_path_rewrite = self.output_path_rewrite(dataset)
+ base_output_path = output_path_rewrite[0:-len(".dat")]
+ remote_extra_files_path_rewrite = "%s_files" % base_output_path
+ return remote_extra_files_path_rewrite
+
+ def input_metadata_rewrite(self, dataset, metadata_val):
+ # May technically be incorrect to not pass through local_path_config.input_metadata_rewrite
+ # first but that adds untested logic that wouln't ever be used.
+ remote_input_path = self.path_mapper.remote_input_path_rewrite(metadata_val, client_input_path_type=CLIENT_INPUT_PATH_TYPES.INPUT_METADATA_PATH)
+ if remote_input_path:
+ log.info("input_metadata_rewrite is %s from %s" % (remote_input_path, metadata_val))
+ self.path_rewrites_input_metadata[metadata_val] = remote_input_path
+ return remote_input_path
+
+ # No rewrite...
+ return None
+
+ def unstructured_path_rewrite(self, parameter_value):
+ path_rewrites_unstructured = self.path_rewrites_unstructured
+ if parameter_value in path_rewrites_unstructured:
+ # Path previously mapped, use previous mapping.
+ return path_rewrites_unstructured[parameter_value]
+
+ rewrite, new_unstructured_path_rewrites = self.path_mapper.check_for_arbitrary_rewrite(parameter_value)
+ if rewrite:
+ path_rewrites_unstructured.update(new_unstructured_path_rewrites)
+ return rewrite
+ else:
+ # Did not need to rewrite, use original path or value.
+ return None
def working_directory(self):
return self._working_directory
@@ -944,27 +1002,6 @@ def sep(self):
def version_path(self):
return self._version_path
- def rewriter(self, parameter_value):
- unstructured_path_rewrites = self.unstructured_path_rewrites
- if parameter_value in unstructured_path_rewrites:
- # Path previously mapped, use previous mapping.
- return unstructured_path_rewrites[parameter_value]
- if parameter_value in unstructured_path_rewrites.values():
- # Path is a rewritten remote path (this might never occur,
- # consider dropping check...)
- return parameter_value
-
- rewrite, new_unstructured_path_rewrites = self.path_mapper.check_for_arbitrary_rewrite(parameter_value)
- if rewrite:
- unstructured_path_rewrites.update(new_unstructured_path_rewrites)
- return rewrite
- else:
- # Did need to rewrite, use original path or value.
- return parameter_value
-
- def unstructured_path_rewriter(self):
- return self.rewriter
-
def tool_directory(self):
return self._tool_dir
diff --git a/lib/galaxy/managers/jobs.py b/lib/galaxy/managers/jobs.py
index ad2d1bacfeb6..289ef282d050 100644
--- a/lib/galaxy/managers/jobs.py
+++ b/lib/galaxy/managers/jobs.py
@@ -294,28 +294,122 @@ def replace_dataset_ids(path, key, value):
return None
-def fetch_job_states(app, sa_session, job_source_ids, job_source_types):
- decode = app.security.decode_id
+def invocation_job_source_iter(sa_session, invocation_id):
+ # TODO: Handle subworkflows.
+ join = model.WorkflowInvocationStep.table.join(
+ model.WorkflowInvocation
+ )
+ statement = select(
+ [model.WorkflowInvocationStep.job_id, model.WorkflowInvocationStep.implicit_collection_jobs_id, model.WorkflowInvocationStep.state]
+ ).select_from(
+ join
+ ).where(
+ model.WorkflowInvocation.id == invocation_id
+ )
+ for row in sa_session.execute(statement):
+ if row[0]:
+ yield ('Job', row[0], row[2])
+ if row[1]:
+ yield ('ImplicitCollectionJobs', row[1], row[2])
+
+
+def fetch_job_states(sa_session, job_source_ids, job_source_types):
assert len(job_source_ids) == len(job_source_types)
job_ids = set()
implicit_collection_job_ids = set()
+ workflow_invocations_job_sources = {}
+ workflow_invocation_states = {} # should be set before we walk step states to be conservative on whether things are done expanding yet
for job_source_id, job_source_type in zip(job_source_ids, job_source_types):
if job_source_type == "Job":
job_ids.add(job_source_id)
elif job_source_type == "ImplicitCollectionJobs":
implicit_collection_job_ids.add(job_source_id)
+ elif job_source_type == "WorkflowInvocation":
+ invocation_state = sa_session.query(model.WorkflowInvocation).get(job_source_id).state
+ workflow_invocation_states[job_source_id] = invocation_state
+ workflow_invocation_job_sources = []
+ for (invocation_step_source_type, invocation_step_source_id, invocation_step_state) in invocation_job_source_iter(sa_session, job_source_id):
+ workflow_invocation_job_sources.append((invocation_step_source_type, invocation_step_source_id, invocation_step_state))
+ if invocation_step_source_type == "Job":
+ job_ids.add(invocation_step_source_id)
+ elif invocation_step_source_type == "ImplicitCollectionJobs":
+ implicit_collection_job_ids.add(invocation_step_source_id)
+ workflow_invocations_job_sources[job_source_id] = workflow_invocation_job_sources
else:
raise RequestParameterInvalidException("Invalid job source type %s found." % job_source_type)
- # TODO: use above sets and optimize queries on second pass.
+ job_summaries = {}
+ implicit_collection_jobs_summaries = {}
+
+ for job_id in job_ids:
+ job_summaries[job_id] = summarize_jobs_to_dict(sa_session, sa_session.query(model.Job).get(job_id))
+ for implicit_collection_jobs_id in implicit_collection_job_ids:
+ implicit_collection_jobs_summaries[implicit_collection_jobs_id] = summarize_jobs_to_dict(sa_session, sa_session.query(model.ImplicitCollectionJobs).get(implicit_collection_jobs_id))
+
rval = []
for job_source_id, job_source_type in zip(job_source_ids, job_source_types):
if job_source_type == "Job":
- rval.append(summarize_jobs_to_dict(sa_session, sa_session.query(model.Job).get(decode(job_source_id))))
+ rval.append(job_summaries[job_source_id])
+ elif job_source_type == "ImplicitCollectionJobs":
+ rval.append(implicit_collection_jobs_summaries[job_source_id])
else:
- rval.append(summarize_jobs_to_dict(sa_session, sa_session.query(model.ImplicitCollectionJobs).get(decode(job_source_id))))
+ invocation_state = workflow_invocation_states[job_source_id]
+ invocation_job_summaries = []
+ invocation_implicit_collection_job_summaries = []
+ invocation_step_states = []
+ for (invocation_step_source_type, invocation_step_source_id, invocation_step_state) in workflow_invocations_job_sources[job_source_id]:
+ invocation_step_states.append(invocation_step_state)
+ if invocation_step_source_type == "Job":
+ invocation_job_summaries.append(job_summaries[invocation_step_source_id])
+ else:
+ invocation_implicit_collection_job_summaries.append(implicit_collection_jobs_summaries[invocation_step_source_id])
+ rval.append(summarize_invocation_jobs(job_source_id, invocation_job_summaries, invocation_implicit_collection_job_summaries, invocation_state, invocation_step_states))
+
+ return rval
+
+def summarize_invocation_jobs(invocation_id, job_summaries, implicit_collection_job_summaries, invocation_state, invocation_step_states):
+ states = {}
+ if invocation_state == "scheduled":
+ all_scheduled = True
+ for invocation_step_state in invocation_step_states:
+ all_scheduled = all_scheduled and invocation_step_state == "scheduled"
+ if all_scheduled:
+ populated_state = "ok"
+ else:
+ populated_state = "new"
+ elif invocation_state in ["cancelled", "failed"]:
+ populated_state = "failed"
+ else:
+ # call new, ready => new
+ populated_state = "new"
+
+ def merge_states(component_states):
+ for key, value in component_states.items():
+ if key not in states:
+ states[key] = value
+ else:
+ states[key] += value
+
+ for job_summary in job_summaries:
+ merge_states(job_summary["states"])
+ for implicit_collection_job_summary in implicit_collection_job_summaries:
+ # 'new' (un-populated collections might not yet have a states entry)
+ if "states" in implicit_collection_job_summary:
+ merge_states(implicit_collection_job_summary["states"])
+ component_populated_state = implicit_collection_job_summary["populated_state"]
+ if component_populated_state == "failed":
+ populated_state = "failed"
+ elif component_populated_state == "new" and populated_state != "failed":
+ populated_state = "new"
+
+ rval = {
+ "id": invocation_id,
+ "model": "WorkflowInvocation",
+ "states": states,
+ "populated_state": populated_state,
+ }
return rval
diff --git a/lib/galaxy/managers/markdown_util.py b/lib/galaxy/managers/markdown_util.py
index be4144804e83..54ab084d68c3 100644
--- a/lib/galaxy/managers/markdown_util.py
+++ b/lib/galaxy/managers/markdown_util.py
@@ -294,7 +294,8 @@ def _remap_galaxy_markdown_containers(func, markdown):
from_markdown = new_markdown[searching_from:]
match = re.search(GALAXY_FENCED_BLOCK, from_markdown)
if match is not None:
- (replacement, whole_block) = func(match.group(1))
+ replace = match.group(1)
+ (replacement, whole_block) = func(replace)
if whole_block:
start_pos = match.start()
end_pos = match.end()
@@ -305,7 +306,7 @@ def _remap_galaxy_markdown_containers(func, markdown):
end_pos = end_pos + searching_from
new_markdown = new_markdown[:start_pos] + replacement + new_markdown[end_pos:]
- searching_from = end_pos
+ searching_from = start_pos + len(replacement)
else:
break
diff --git a/lib/galaxy/managers/users.py b/lib/galaxy/managers/users.py
index 7ca6d1d1aaaa..5d1b5ae92838 100644
--- a/lib/galaxy/managers/users.py
+++ b/lib/galaxy/managers/users.py
@@ -20,7 +20,11 @@
base,
deletable
)
-from galaxy.security.validate_user_input import validate_email, validate_password, validate_publicname
+from galaxy.security.validate_user_input import (
+ validate_email,
+ validate_password,
+ validate_publicname
+)
from galaxy.util.hash_util import new_secure_hash
from galaxy.web import url_for
@@ -490,10 +494,7 @@ def send_reset_email(self, trans, payload={}, **kwd):
return "Failed to produce password reset token. User not found."
def get_reset_token(self, trans, email):
- reset_user = trans.sa_session.query(self.app.model.User).filter(self.app.model.User.table.c.email == email).first()
- if not reset_user:
- # Perform a case-insensitive check only if the user wasn't found
- reset_user = trans.sa_session.query(self.app.model.User).filter(func.lower(self.app.model.User.table.c.email) == func.lower(email)).first()
+ reset_user = trans.sa_session.query(self.app.model.User).filter(func.lower(self.app.model.User.table.c.email) == email.lower()).first()
if reset_user:
prt = self.app.model.PasswordResetToken(reset_user)
trans.sa_session.add(prt)
diff --git a/lib/galaxy/managers/workflows.py b/lib/galaxy/managers/workflows.py
index 8b871719502b..42290a266118 100644
--- a/lib/galaxy/managers/workflows.py
+++ b/lib/galaxy/managers/workflows.py
@@ -67,7 +67,7 @@ def get_stored_workflow(self, trans, workflow_id, by_stored_id=True):
# see if they have passed in the UUID for a workflow that is attached to a stored workflow
workflow_uuid = uuid.UUID(workflow_id)
workflow_query = trans.sa_session.query(trans.app.model.StoredWorkflow).filter(and_(
- trans.app.model.StoredWorkflow.latest_workflow_id == trans.app.model.Workflow.id,
+ trans.app.model.StoredWorkflow.id == trans.app.model.Workflow.stored_workflow_id,
trans.app.model.Workflow.uuid == workflow_uuid
))
elif by_stored_id:
@@ -77,7 +77,7 @@ def get_stored_workflow(self, trans, workflow_id, by_stored_id=True):
else:
workflow_id = decode_id(self.app, workflow_id)
workflow_query = trans.sa_session.query(trans.app.model.StoredWorkflow).filter(and_(
- trans.app.model.StoredWorkflow.latest_workflow_id == trans.app.model.Workflow.id,
+ trans.app.model.StoredWorkflow.id == trans.app.model.Workflow.stored_workflow_id,
trans.app.model.Workflow.id == workflow_id
))
stored_workflow = workflow_query.options(joinedload('annotations'),
@@ -194,7 +194,7 @@ def update_invocation_step(self, trans, decoded_workflow_invocation_step_id, act
trans.sa_session.flush()
return workflow_invocation_step
- def build_invocations_query(self, trans, stored_workflow_id=None, history_id=None, user_id=None):
+ def build_invocations_query(self, trans, stored_workflow_id=None, history_id=None, user_id=None, include_terminal=True):
"""Get invocations owned by the current user."""
sa_session = trans.sa_session
invocations_query = sa_session.query(model.WorkflowInvocation)
@@ -220,6 +220,11 @@ def build_invocations_query(self, trans, stored_workflow_id=None, history_id=Non
model.WorkflowInvocation.table.c.history_id == history_id
)
+ if not include_terminal:
+ invocations_query = invocations_query.filter(
+ model.WorkflowInvocation.table.c.state.in_(model.WorkflowInvocation.non_terminal_states)
+ )
+
return [inv for inv in invocations_query if self.check_security(trans,
inv,
check_ownership=True,
diff --git a/lib/galaxy/model/__init__.py b/lib/galaxy/model/__init__.py
index b93c268b3515..b5b00d8603d9 100644
--- a/lib/galaxy/model/__init__.py
+++ b/lib/galaxy/model/__init__.py
@@ -4484,14 +4484,15 @@ def __init__(self):
class StoredWorkflow(HasTags, Dictifiable, RepresentById):
- dict_collection_visible_keys = ['id', 'name', 'published', 'deleted']
- dict_element_visible_keys = ['id', 'name', 'published', 'deleted']
+ dict_collection_visible_keys = ['id', 'name', 'create_time', 'published', 'deleted']
+ dict_element_visible_keys = ['id', 'name', 'create_time', 'published', 'deleted']
def __init__(self):
self.id = None
self.user = None
self.name = None
self.slug = None
+ self.create_time = None
self.published = False
self.latest_workflow_id = None
self.workflows = []
@@ -4893,8 +4894,8 @@ def __init__(self):
class WorkflowInvocation(UsesCreateAndUpdateTime, Dictifiable, RepresentById):
- dict_collection_visible_keys = ['id', 'update_time', 'workflow_id', 'history_id', 'uuid', 'state']
- dict_element_visible_keys = ['id', 'update_time', 'workflow_id', 'history_id', 'uuid', 'state']
+ dict_collection_visible_keys = ['id', 'update_time', 'create_time', 'workflow_id', 'history_id', 'uuid', 'state']
+ dict_element_visible_keys = ['id', 'update_time', 'create_time', 'workflow_id', 'history_id', 'uuid', 'state']
states = Bunch(
NEW='new', # Brand new workflow invocation... maybe this should be same as READY
READY='ready', # Workflow ready for another iteration of scheduling.
@@ -4902,6 +4903,7 @@ class WorkflowInvocation(UsesCreateAndUpdateTime, Dictifiable, RepresentById):
CANCELLED='cancelled',
FAILED='failed',
)
+ non_terminal_states = [states.NEW, states.READY]
def __init__(self):
self.subworkflow_invocations = []
@@ -5071,7 +5073,7 @@ def output_associations(self):
for output_dataset_assoc in self.output_datasets:
outputs.append(output_dataset_assoc)
for output_dataset_collection_assoc in self.output_dataset_collections:
- outputs.append(output_dataset_collection_assoc.dataset_collection)
+ outputs.append(output_dataset_collection_assoc)
return outputs
@property
diff --git a/lib/galaxy/queue_worker.py b/lib/galaxy/queue_worker.py
index 4713bc241568..4f646e4db0e9 100644
--- a/lib/galaxy/queue_worker.py
+++ b/lib/galaxy/queue_worker.py
@@ -178,8 +178,6 @@ def _get_new_toolbox(app):
if hasattr(app, 'tool_shed_repository_cache'):
app.tool_shed_repository_cache.rebuild()
tool_configs = app.config.tool_configs
- if app.config.migrated_tools_config not in tool_configs:
- tool_configs.append(app.config.migrated_tools_config)
new_toolbox = tools.ToolBox(tool_configs, app.config.tool_path, app)
new_toolbox.data_manager_tools = app.toolbox.data_manager_tools
diff --git a/lib/galaxy/security/validate_user_input.py b/lib/galaxy/security/validate_user_input.py
index 71d541956934..ae5dc840910e 100644
--- a/lib/galaxy/security/validate_user_input.py
+++ b/lib/galaxy/security/validate_user_input.py
@@ -7,6 +7,8 @@
import logging
import re
+from sqlalchemy import func
+
log = logging.getLogger(__name__)
# Email validity parameters
@@ -35,8 +37,8 @@ def validate_email(trans, email, user=None, check_dup=True, allow_empty=False):
message = "The format of the email address is not correct."
elif len(email) > EMAIL_MAX_LEN:
message = "Email address cannot be more than %d characters in length." % EMAIL_MAX_LEN
- elif check_dup and trans.sa_session.query(trans.app.model.User).filter_by(email=email).first():
- message = "User with that email already exists."
+ elif check_dup and trans.sa_session.query(trans.app.model.User).filter(func.lower(trans.app.model.User.table.c.email) == email.lower()).first():
+ message = "User with email '%s' already exists." % email
# If the blacklist is not empty filter out the disposable domains.
elif trans.app.config.blacklist_content is not None:
domain = email.split('@')[1]
@@ -48,8 +50,10 @@ def validate_email(trans, email, user=None, check_dup=True, allow_empty=False):
def validate_publicname(trans, publicname, user=None):
- # User names must be at least three characters in length and contain only lower-case
- # letters, numbers, and the '-' character.
+ """
+ Check that publicname respects the minimum and maximum string length, the
+ allowed characters, and that the username is not taken already.
+ """
if user and user.username == publicname:
return ''
if len(publicname) < PUBLICNAME_MIN_LEN:
@@ -63,24 +67,23 @@ def validate_publicname(trans, publicname, user=None):
return ''
-def transform_publicname(trans, publicname, user=None):
- # User names must be at least four characters in length and contain only lower-case
- # letters, numbers, and the '-' character.
+def transform_publicname(publicname):
+ """
+ Transform publicname to respect the minimum and maximum string length, and
+ the allowed characters.
+ FILL_CHAR is used to extend or replace characters.
+ """
# TODO: Enhance to allow generation of semi-random publicnnames e.g., when valid but taken
- if user and user.username == publicname:
- return publicname
- elif publicname not in ['None', None, '']:
+ if publicname not in ['None', None, '']:
publicname = publicname.lower()
publicname = re.sub(VALID_PUBLICNAME_SUB, FILL_CHAR, publicname)
publicname = publicname.ljust(PUBLICNAME_MIN_LEN + 1, FILL_CHAR)[:PUBLICNAME_MAX_LEN]
- if not trans.sa_session.query(trans.app.model.User).filter_by(username=publicname).first():
- return publicname
- return ''
+ return publicname
def validate_password(trans, password, confirm):
if len(password) < PASSWORD_MIN_LEN:
return "Use a password of at least %d characters." % PASSWORD_MIN_LEN
- elif password != confirm:
+ if password != confirm:
return "Passwords do not match."
return ""
diff --git a/lib/galaxy/selenium/navigates_galaxy.py b/lib/galaxy/selenium/navigates_galaxy.py
index a38b2a4fd80d..5b285f2bed73 100644
--- a/lib/galaxy/selenium/navigates_galaxy.py
+++ b/lib/galaxy/selenium/navigates_galaxy.py
@@ -427,20 +427,16 @@ def register(self, email=None, password=None, username=None, confirm=None, asser
confirm=confirm
))
self.wait_for_and_click(self.navigation.registration.selectors.submit)
- # Give the browser a bit of time to submit the request.
- # It would be good to eliminate this sleep, but it can't be because Galaxy
- # doesn't swap the "User" menu automatically after it registers a user and
- # and the donemessage visible comment below doesn't work when using Selenium.
- # Something about the Selenium session or quickness of registering causes the
- # following in the Galaxy logs which gets propaged to the GUI as a generic error:
- # /api/histories/cfc05ccec54895e2/contents?keys=type_id%2Celement_count&order=hid&v=dev&q=history_content_type&q=deleted&q=purged&q=visible&qv=dataset_collection&qv=False&qv=False&qv=True HTTP/1.1" 403 - "http://localhost:8080/"
- # Like the logged in user doesn't have permission to the previously anonymous user's
- # history, it is odd but I cannot replicate this outside of Selenium.
- time.sleep(1.35)
-
- if assert_valid:
- # self.wait_for_selector_visible(".donemessage")
+ if assert_valid is False:
+ self.assert_error_message()
+ elif assert_valid:
+ self.wait_for_logged_in()
+
+ # Code below previously was needed because there was a bug that would prevent the masthead from changing,
+ # the bug seems maybe to be fixed though - so we could consider eliminating these extra checks to speed
+ # up tests.
self.home()
+ self.wait_for_logged_in()
self.click_masthead_user()
# Make sure the user menu was dropped down
user_menu = self.components.masthead.user_menu.wait_for_visible()
@@ -461,15 +457,20 @@ def register(self, email=None, password=None, username=None, confirm=None, asser
def wait_for_logged_in(self):
try:
- self.wait_for_visible(self.navigation.masthead.selectors.logged_in_only)
+ self.components.masthead.logged_in_only.wait_for_visible()
except self.TimeoutException as e:
+ ui_logged_out = self.components.masthead.logged_out_only.is_displayed
+ if ui_logged_out:
+ dom_message = "Element a.loggedout-only is present in DOM, indicating Login or Register button still in masthead."
+ else:
+ dom_message = "Element a.loggedout-only is *not* present in DOM."
user_info = self.api_get("users/current")
if "username" in user_info:
- template = "Failed waiting for masthead to update for login, but user API response indicates [%s] is logged in. This seems to be a bug in Galaxy. API response was [%s]. "
- message = template % (user_info["username"], user_info)
+ template = "Failed waiting for masthead to update for login, but user API response indicates [%s] is logged in. This seems to be a bug in Galaxy. %s logged API response was [%s]. "
+ message = template % (user_info["username"], dom_message, user_info)
raise self.prepend_timeout_message(e, message)
else:
- raise NotLoggedInException(e, user_info)
+ raise NotLoggedInException(e, user_info, dom_message)
def click_center(self):
action_chains = self.action_chains()
@@ -940,8 +941,8 @@ def workflow_index_open(self):
self.click_masthead_workflow()
def workflow_index_table_elements(self):
- self.wait_for_selector_visible("tbody.workflow-search")
- table_elements = self.driver.find_elements_by_css_selector("tbody.workflow-search > tr:not([style*='display: none'])")
+ self.wait_for_selector_visible("#workflow-table")
+ table_elements = self.driver.find_elements_by_css_selector("#workflow-table > tbody > tr:not([style*='display: none'])")
return table_elements
def workflow_index_table_row(self, workflow_index=0):
@@ -954,11 +955,11 @@ def workflow_index_column_text(self, column_index, workflow_index=0):
return columns[column_index].text
def workflow_index_click_search(self):
- return self.wait_for_and_click_selector("input.search-wf")
+ return self.wait_for_and_click_selector("#workflow-search")
def workflow_index_search_for(self, search_term=None):
return self._inline_search_for(
- "input.search-wf",
+ "#workflow-search",
search_term,
)
@@ -975,19 +976,17 @@ def workflow_index_rename(self, new_name, workflow_index=0):
def workflow_index_name(self, workflow_index=0):
"""Get workflow name for workflow_index'th row."""
row_element = self.workflow_index_table_row(workflow_index=workflow_index)
- workflow_button = row_element.find_element_by_css_selector("a.btn.btn-secondary")
+ workflow_button = row_element.find_element_by_css_selector(".workflow-dropdown")
return workflow_button.text
- def workflow_index_click_option(self, option_title, workflow_index=0):
-
- @retry_during_transitions
- def click_option():
- workflow_row = self.workflow_index_table_row(workflow_index=workflow_index)
- workflow_button = workflow_row.find_element_by_css_selector("button.dropdown-toggle")
- workflow_button.click()
-
- click_option()
+ @retry_during_transitions
+ def workflow_click_option(self, workflow_selector, workflow_index=0):
+ workflow_row = self.workflow_index_table_row(workflow_index=workflow_index)
+ workflow_button = workflow_row.find_element_by_css_selector(workflow_selector)
+ workflow_button.click()
+ def workflow_index_click_option(self, option_title, workflow_index=0):
+ self.workflow_click_option(".workflow-dropdown", workflow_index)
menu_element = self.wait_for_selector_visible(".dropdown-menu.show")
menu_options = menu_element.find_elements_by_css_selector("a.dropdown-item")
found_option = False
@@ -1276,10 +1275,18 @@ def collection_builder_click_paired_item(self, forward_or_reverse, item):
def logout_if_needed(self):
if self.is_logged_in():
self.home()
- self.click_masthead_user()
- self.wait_for_and_click(self.navigation.masthead.labels.logout)
- self.sleep_for(WAIT_TYPES.UX_TRANSITION)
- assert not self.is_logged_in()
+ self.logout()
+
+ def logout(self):
+ self.components.masthead.logged_in_only.wait_for_visible()
+ self.click_masthead_user()
+ self.components.masthead.logout.wait_for_and_click()
+ try:
+ self.components.masthead.logged_out_only.wait_for_visible()
+ except self.TimeoutException as e:
+ message = "Clicked logout button but waiting for 'Login or Registration' button failed, perhaps the logout button was clicked before the handler was setup?"
+ raise self.prepend_timeout_message(e, message)
+ assert not self.is_logged_in(), "Clicked to logged out and UI reflects a logout, but API still thinks a user is logged in."
def run_tour(self, path, skip_steps=None, sleep_on_steps=None, tour_callback=None):
skip_steps = skip_steps or []
@@ -1468,9 +1475,9 @@ def snapshot(self, description):
class NotLoggedInException(TimeoutException):
- def __init__(self, timeout_exception, user_info):
- template = "Waiting for UI to reflect user logged in but it did not occur. API indicates no user is currently logged in. API response was [%s]. %s"
- msg = template % (user_info, timeout_exception.msg)
+ def __init__(self, timeout_exception, user_info, dom_message):
+ template = "Waiting for UI to reflect user logged in but it did not occur. API indicates no user is currently logged in. %s API response was [%s]. %s"
+ msg = template % (dom_message, user_info, timeout_exception.msg)
super(NotLoggedInException, self).__init__(
msg=msg,
screen=timeout_exception.screen,
diff --git a/lib/galaxy/selenium/navigation.yml b/lib/galaxy/selenium/navigation.yml
index eae430b7eed3..5d5d9a1fcf01 100644
--- a/lib/galaxy/selenium/navigation.yml
+++ b/lib/galaxy/selenium/navigation.yml
@@ -45,6 +45,7 @@ masthead:
selector: '//a[contains(text(), "Logged in as")]'
logged_in_only: 'a.loggedin-only'
+ logged_out_only: 'a.loggedout-only'
labels:
# top-level menus
@@ -249,8 +250,8 @@ tool_form:
workflows:
selectors:
- new_button: '#new-workflow'
- import_button: '#import-workflow'
+ new_button: '#workflow-create'
+ import_button: '#workflow-import'
workflow_run:
diff --git a/lib/galaxy/tool_util/deps/__init__.py b/lib/galaxy/tool_util/deps/__init__.py
index af255a818de6..b8f452c11468 100644
--- a/lib/galaxy/tool_util/deps/__init__.py
+++ b/lib/galaxy/tool_util/deps/__init__.py
@@ -123,7 +123,7 @@ def __init__(self, default_base_path, conf_file=None, app_config={}):
plugin_source = None
dependency_resolver_dicts = app_config.get("dependency_resolvers")
if dependency_resolver_dicts is not None:
- plugin_source = ('dict', dependency_resolver_dicts)
+ plugin_source = plugin_config.PluginConfigSource('dict', dependency_resolver_dicts)
else:
plugin_source = self.__build_dependency_resolvers_plugin_source(conf_file)
self.dependency_resolvers = self.__parse_resolver_conf_plugins(plugin_source)
@@ -298,7 +298,7 @@ def __build_dependency_resolvers_plugin_source(self, conf_file):
return plugin_source
def __default_dependency_resolvers_source(self):
- return ('dict', [
+ return plugin_config.PluginConfigSource('dict', [
{"type": "tool_shed_packages"},
{"type": "galaxy_packages"},
{"type": "conda"},
diff --git a/lib/galaxy/tool_util/deps/container_classes.py b/lib/galaxy/tool_util/deps/container_classes.py
index 090573715023..2c8489b2cbf5 100644
--- a/lib/galaxy/tool_util/deps/container_classes.py
+++ b/lib/galaxy/tool_util/deps/container_classes.py
@@ -72,7 +72,7 @@ def __init__(self, container_id, app_info, tool_info, destination_info, job_info
self.container_info = {}
def prop(self, name, default):
- destination_name = "docker_%s" % name
+ destination_name = "%s_%s" % (self.container_type, name)
return self.destination_info.get(destination_name, default)
@property
diff --git a/lib/galaxy/tool_util/deps/docker_util.py b/lib/galaxy/tool_util/deps/docker_util.py
index 9647ca7ecc95..e5ef9cb7587f 100644
--- a/lib/galaxy/tool_util/deps/docker_util.py
+++ b/lib/galaxy/tool_util/deps/docker_util.py
@@ -9,7 +9,7 @@
from .commands import argv_to_str
DEFAULT_DOCKER_COMMAND = "docker"
-DEFAULT_SUDO = True
+DEFAULT_SUDO = False
DEFAULT_SUDO_COMMAND = "sudo"
DEFAULT_HOST = None
DEFAULT_VOLUME_MOUNT_TYPE = "rw"
diff --git a/lib/galaxy/tools/data/__init__.py b/lib/galaxy/tools/data/__init__.py
index 0149a444b1c7..35224d0b5371 100644
--- a/lib/galaxy/tools/data/__init__.py
+++ b/lib/galaxy/tools/data/__init__.py
@@ -157,7 +157,7 @@ def add_new_entries_from_config_file(self, config_filename, tool_data_path, shed
from_shed_config=True)
except Exception as e:
error_message = 'Error attempting to parse file %s: %s' % (str(os.path.split(config_filename)[1]), util.unicodify(e))
- log.debug(error_message)
+ log.debug(error_message, exc_info=True)
table_elems = []
if persist:
# Persist Galaxy's version of the changed tool_data_table_conf.xml file.
@@ -683,7 +683,7 @@ def filter_file_fields(self, loc_file, values):
if fields != values:
rval += line
- with open(loc_file, 'wb') as writer:
+ with open(loc_file, 'w') as writer:
writer.write(rval)
return rval
diff --git a/lib/galaxy/tools/error_reports/__init__.py b/lib/galaxy/tools/error_reports/__init__.py
index 8fc4e4473986..acbd9de403c3 100644
--- a/lib/galaxy/tools/error_reports/__init__.py
+++ b/lib/galaxy/tools/error_reports/__init__.py
@@ -8,6 +8,19 @@
log = logging.getLogger(__name__)
+DEFAULT_CONFIG = [
+ {
+ 'type': 'email',
+ 'verbose': True,
+ 'user_submission': True,
+ },
+ {
+ 'type': 'sentry',
+ 'user_submission': False,
+ },
+]
+DEFAULT_PLUGINS_SOURCE = plugin_config.PluginConfigSource('dict', DEFAULT_CONFIG)
+
class ErrorReports(object):
"""Load and store a collection of :class:`ErrorPlugin` objects."""
@@ -26,7 +39,8 @@ def __plugins_dict(self):
class NullErrorPlugin(object):
def submit_report(self, dataset, job, tool, **kwargs):
- return "Submitted Bug Report"
+ log.warning("Bug report for dataset %s, job %s submitted to NullErrorPlugin", dataset, job)
+ return [("Error reporting is not configured for this Galaxy instance", "danger")]
NULL_ERROR_PLUGIN = NullErrorPlugin()
@@ -68,7 +82,9 @@ def __plugins_from_source(self, plugins_source):
@staticmethod
def from_file(plugin_classes, conf_file, **kwargs):
- if not conf_file or not os.path.exists(conf_file):
+ plugins_source = DEFAULT_PLUGINS_SOURCE
+ if conf_file and os.path.exists(conf_file):
+ plugins_source = plugin_config.plugin_source_from_path(conf_file)
+ if not plugins_source.source:
return NULL_ERROR_PLUGIN
- plugins_source = plugin_config.plugin_source_from_path(conf_file)
return ErrorPlugin(plugin_classes, plugins_source, **kwargs)
diff --git a/lib/galaxy/tools/error_reports/plugins/gitlab.py b/lib/galaxy/tools/error_reports/plugins/gitlab.py
index 1f36148ee39d..473d6adea937 100644
--- a/lib/galaxy/tools/error_reports/plugins/gitlab.py
+++ b/lib/galaxy/tools/error_reports/plugins/gitlab.py
@@ -91,6 +91,10 @@ def submit_report(self, dataset, job, tool, **kwargs):
# Find the repo inside the ToolShed
ts_repourl = self._get_gitrepo_from_ts(job, ts_url)
+ # Remove .git from the repository URL if this was specified
+ if ts_repourl.endswith(".git"):
+ ts_repourl = ts_repourl[:-4]
+
log.info("GitLab error reporting - Determine ToolShed Repository URL: %s", ts_repourl)
# Determine the GitLab project URL and the issue cache key
@@ -117,15 +121,34 @@ def submit_report(self, dataset, job, tool, **kwargs):
error_message = self._generate_error_message(dataset, job, kwargs)
# Determine the user to assign to the issue
- gl_username = gl_project.commits.list()[0].attributes['author_name']
- if gl_username not in self.git_username_id_cache:
- self.git_username_id_cache[gl_username] = gitlab.users.list(username=gl_username)[0].get_id()
- gl_userid = self.git_username_id_cache[gl_username]
+ gl_userid = None
+ if len(gl_project.commits.list()) > 0:
+ gl_username = gl_project.commits.list()[0].attributes['author_name']
+ if not self.redact_user_details_in_bugreport:
+ log.debug("GitLab error reporting - Last commiter username: %s" % gl_username)
+ if gl_username not in self.git_username_id_cache:
+ log.debug("GitLab error reporting - Last Committer user ID: %d" %
+ self.gitlab.users.list(username=gl_username)[0].get_id())
+ self.git_username_id_cache[gl_username] = self.gitlab.users.list(username=gl_username)[
+ 0].get_id()
+ gl_userid = self.git_username_id_cache.get(gl_username, None)
log.info(error_title in self.issue_cache[issue_cache_key])
if error_title not in self.issue_cache[issue_cache_key]:
- # Create a new issue.
- self._create_issue(issue_cache_key, error_title, error_message, gl_project, gl_userid=gl_userid)
+ try:
+ # Create a new issue.
+ self._create_issue(issue_cache_key, error_title, error_message, gl_project, gl_userid=gl_userid)
+ except gitlab.GitlabOwnershipError:
+ gitlab_projecturl = "/".join([self.git_default_repo_owner, self.git_default_repo_name])
+ gitlab_urlencodedpath = urllib.quote_plus(gitlab_projecturl)
+ # Make sure we are always logged in, then retrieve the GitLab project if it isn't cached.
+ self.gitlab.auth()
+ if gitlab_projecturl not in self.git_project_cache:
+ self.git_project_cache[gitlab_projecturl] = self.gitlab.projects.get(gitlab_urlencodedpath)
+ gl_project = self.git_project_cache[gitlab_projecturl]
+
+ # Submit issue to default project
+ self._create_issue(issue_cache_key, error_title, error_message, gl_project, gl_userid=gl_userid)
else:
# Add a comment to an issue...
self._append_issue(issue_cache_key, error_title, error_message, gitlab_urlencodedpath=gitlab_urlencodedpath)
@@ -179,7 +202,7 @@ def _create_issue(self, issue_cache_key, error_title, error_message, project, **
# Assign the user to the issue
gl_userid = kwargs.get("gl_userid", None)
- if not gl_userid:
+ if gl_userid is not None:
issue_data['assignee_ids'] = [gl_userid]
# Create the issue on GitLab
diff --git a/lib/galaxy/tools/evaluation.py b/lib/galaxy/tools/evaluation.py
index 028238b9ee4f..6134bc2574b2 100644
--- a/lib/galaxy/tools/evaluation.py
+++ b/lib/galaxy/tools/evaluation.py
@@ -7,7 +7,6 @@
from six import string_types
from galaxy import model
-from galaxy.job_execution.datasets import dataset_path_rewrites
from galaxy.model.none_like import NoneDataset
from galaxy.tools import global_tool_errors
from galaxy.tools.parameters import (
@@ -36,6 +35,7 @@
)
from galaxy.util import (
find_instance_nested,
+ safe_makedirs,
unicodify,
)
from galaxy.util.bunch import Bunch
@@ -63,7 +63,6 @@ def set_compute_environment(self, compute_environment, get_special=None):
for evaluating command and config cheetah templates.
"""
self.compute_environment = compute_environment
- self.unstructured_path_rewriter = compute_environment.unstructured_path_rewriter()
job = self.job
incoming = dict([(p.name, p.value) for p in job.parameters])
@@ -87,6 +86,8 @@ def validate_inputs(input, value, context, **kwargs):
# fake dataset association that provides the needed attributes for
# preparing a job.
class FakeDatasetAssociation (object):
+ fake_dataset_association = True
+
def __init__(self, dataset=None):
self.dataset = dataset
self.file_name = dataset.file_name
@@ -105,9 +106,6 @@ def __init__(self, dataset=None):
inp_data,
out_data,
output_collections=out_collections,
- output_paths=compute_environment.output_paths(),
- job_working_directory=compute_environment.working_directory(),
- input_paths=compute_environment.input_paths()
)
# Certain tools require tasks to be completed prior to job execution
@@ -119,7 +117,7 @@ def __init__(self, dataset=None):
self.param_dict = param_dict
- def build_param_dict(self, incoming, input_datasets, output_datasets, output_collections, output_paths, job_working_directory, input_paths=[]):
+ def build_param_dict(self, incoming, input_datasets, output_datasets, output_collections):
"""
Build the dictionary of parameters for substituting into the command
line. Each value is wrapped in a `InputValueWrapper`, which allows
@@ -127,6 +125,9 @@ def build_param_dict(self, incoming, input_datasets, output_datasets, output_col
when the __str__ method is called it actually calls the
`to_param_dict_string` method of the associated input.
"""
+ compute_environment = self.compute_environment
+ job_working_directory = compute_environment.working_directory()
+
param_dict = dict()
def input():
@@ -139,11 +140,10 @@ def input():
# All parameters go into the param_dict
param_dict.update(incoming)
- input_dataset_paths = dataset_path_rewrites(input_paths)
- self.__populate_wrappers(param_dict, input_datasets, input_dataset_paths, job_working_directory)
- self.__populate_input_dataset_wrappers(param_dict, input_datasets, input_dataset_paths)
- self.__populate_output_dataset_wrappers(param_dict, output_datasets, output_paths, job_working_directory)
- self.__populate_output_collection_wrappers(param_dict, output_collections, output_paths, job_working_directory)
+ self.__populate_wrappers(param_dict, input_datasets, job_working_directory)
+ self.__populate_input_dataset_wrappers(param_dict, input_datasets)
+ self.__populate_output_dataset_wrappers(param_dict, output_datasets, job_working_directory)
+ self.__populate_output_collection_wrappers(param_dict, output_collections, job_working_directory)
self.__populate_unstructured_path_rewrites(param_dict)
# Call param dict sanitizer, before non-job params are added, as we don't want to sanitize filenames.
self.__sanitize_param_dict(param_dict)
@@ -178,7 +178,7 @@ def do_walk(inputs, input_values):
do_walk(inputs, input_values)
- def __populate_wrappers(self, param_dict, input_datasets, input_dataset_paths, job_working_directory):
+ def __populate_wrappers(self, param_dict, input_datasets, job_working_directory):
def wrap_input(input_values, input):
value = input_values[input.name]
@@ -187,7 +187,7 @@ def wrap_input(input_values, input):
input_values[input.name] = \
DatasetListWrapper(job_working_directory,
dataset_instances,
- dataset_paths=input_dataset_paths,
+ compute_environment=self.compute_environment,
datatypes_registry=self.app.datatypes_registry,
tool=self.tool,
name=input.name)
@@ -228,13 +228,9 @@ def wrap_input(input_values, input):
wrapper_kwds = dict(
datatypes_registry=self.app.datatypes_registry,
tool=self,
- name=input.name
+ name=input.name,
+ compute_environment=self.compute_environment
)
- if dataset:
- # A None dataset does not have a filename
- real_path = dataset.file_name
- if real_path in input_dataset_paths:
- wrapper_kwds["dataset_path"] = input_dataset_paths[real_path]
element_identifier = element_identifier_mapper.identifier(dataset, param_dict)
if element_identifier:
wrapper_kwds["identifier"] = element_identifier
@@ -244,7 +240,7 @@ def wrap_input(input_values, input):
dataset_collection = value
wrapper_kwds = dict(
datatypes_registry=self.app.datatypes_registry,
- dataset_paths=input_dataset_paths,
+ compute_environment=self.compute_environment,
tool=self,
name=input.name
)
@@ -256,7 +252,7 @@ def wrap_input(input_values, input):
input_values[input.name] = wrapper
elif isinstance(input, SelectToolParameter):
input_values[input.name] = SelectToolParameterWrapper(
- input, value, other_values=param_dict, path_rewriter=self.unstructured_path_rewriter)
+ input, value, other_values=param_dict, compute_environment=self.compute_environment)
else:
input_values[input.name] = InputValueWrapper(
input, value, param_dict)
@@ -268,7 +264,7 @@ def wrap_input(input_values, input):
element_identifier_mapper = ElementIdentifierMapper(input_datasets)
self.__walk_inputs(self.tool.inputs, param_dict, wrap_input)
- def __populate_input_dataset_wrappers(self, param_dict, input_datasets, input_dataset_paths):
+ def __populate_input_dataset_wrappers(self, param_dict, input_datasets):
# TODO: Update this method for dataset collections? Need to test. -John.
# FIXME: when self.check_values==True, input datasets are being wrapped
@@ -298,16 +294,11 @@ def __populate_input_dataset_wrappers(self, param_dict, input_datasets, input_da
datatypes_registry=self.app.datatypes_registry,
tool=self,
name=name,
+ compute_environment=self.compute_environment,
)
- if data:
- real_path = data.file_name
- if real_path in input_dataset_paths:
- dataset_path = input_dataset_paths[real_path]
- wrapper_kwds['dataset_path'] = dataset_path
param_dict[name] = DatasetFilenameWrapper(data, **wrapper_kwds)
- def __populate_output_collection_wrappers(self, param_dict, output_collections, output_paths, job_working_directory):
- output_dataset_paths = dataset_path_rewrites(output_paths)
+ def __populate_output_collection_wrappers(self, param_dict, output_collections, job_working_directory):
tool = self.tool
for name, out_collection in output_collections.items():
if name not in tool.output_collections:
@@ -318,7 +309,7 @@ def __populate_output_collection_wrappers(self, param_dict, output_collections,
wrapper_kwds = dict(
datatypes_registry=self.app.datatypes_registry,
- dataset_paths=output_dataset_paths,
+ compute_environment=self.compute_environment,
tool=tool,
name=name
)
@@ -336,23 +327,20 @@ def __populate_output_collection_wrappers(self, param_dict, output_collections,
param_dict[output_def.name] = dataset_wrapper
log.info("Updating param_dict for %s with %s" % (output_def.name, dataset_wrapper))
- def __populate_output_dataset_wrappers(self, param_dict, output_datasets, output_paths, job_working_directory):
- output_dataset_paths = dataset_path_rewrites(output_paths)
+ def __populate_output_dataset_wrappers(self, param_dict, output_datasets, job_working_directory):
for name, hda in output_datasets.items():
# Write outputs to the working directory (for security purposes)
# if desired.
- real_path = hda.file_name
- if real_path in output_dataset_paths:
- dataset_path = output_dataset_paths[real_path]
- param_dict[name] = DatasetFilenameWrapper(hda, dataset_path=dataset_path)
- try:
- open(dataset_path.false_path, 'w').close()
- except EnvironmentError:
- pass # May well not exist - e.g. Pulsar.
- else:
- param_dict[name] = DatasetFilenameWrapper(hda)
+ param_dict[name] = DatasetFilenameWrapper(hda, compute_environment=self.compute_environment, io_type="output")
+ try:
+ open(str(param_dict[name]), 'w').close()
+ except EnvironmentError:
+ pass # May well not exist - e.g. Pulsar.
+
# Provide access to a path to store additional files
- # TODO: path munging for cluster/dataset server relocatability
+ # TODO: move compute path logic into compute environment, move setting files_path
+ # logic into DatasetFilenameWrapper. Currently this sits in the middle and glues
+ # stuff together inconsistently with the way the rest of path rewriting works.
store_by = getattr(hda.dataset.object_store, "store_by", "id")
file_name = "dataset_%s_files" % getattr(hda.dataset, store_by)
param_dict[name].files_path = os.path.abspath(os.path.join(job_working_directory, file_name))
@@ -399,9 +387,9 @@ def __populate_unstructured_path_rewrites(self, param_dict):
def rewrite_unstructured_paths(input_values, input):
if isinstance(input, SelectToolParameter):
input_values[input.name] = SelectToolParameterWrapper(
- input, input_values[input.name], other_values=param_dict, path_rewriter=self.unstructured_path_rewriter)
+ input, input_values[input.name], other_values=param_dict, compute_environment=self.compute_environment)
- if not self.tool.check_values and self.unstructured_path_rewriter:
+ if not self.tool.check_values and self.compute_environment:
# The tools weren't "wrapped" yet, but need to be in order to get
# the paths rewritten.
self.__walk_inputs(self.tool.inputs, param_dict, rewrite_unstructured_paths)
@@ -525,7 +513,9 @@ def __build_config_files(self):
for name, filename, content in self.tool.config_files:
config_text, is_template = self.__build_config_file_text(content)
# If a particular filename was forced by the config use it
- directory = self.local_working_directory
+ directory = os.path.join(self.local_working_directory, "configs")
+ if not os.path.exists(directory):
+ os.makedirs(directory)
if filename is not None:
config_filename = os.path.join(directory, filename)
else:
@@ -601,6 +591,9 @@ def __build_config_file_text(self, content):
return json.dumps(wrapped_json.json_wrap(self.tool.inputs, self.param_dict, handle_files=handle_files)), False
def __write_workdir_file(self, config_filename, content, context, is_template=True, strip=False):
+ parent_dir = os.path.dirname(config_filename)
+ if not os.path.exists(parent_dir):
+ safe_makedirs(parent_dir)
if is_template:
value = fill_template(content, context=context, python_template_version=self.tool.python_template_version)
else:
diff --git a/lib/galaxy/tools/parameters/wrapped_json.py b/lib/galaxy/tools/parameters/wrapped_json.py
index 73d019cdedca..b8a44e1ae9f2 100644
--- a/lib/galaxy/tools/parameters/wrapped_json.py
+++ b/lib/galaxy/tools/parameters/wrapped_json.py
@@ -32,23 +32,23 @@ def _data_input_to_path(v):
repeat_job_value = []
for d in value:
repeat_instance_job_value = {}
- json_wrap(input.inputs, d, repeat_instance_job_value)
+ json_wrap(input.inputs, d, repeat_instance_job_value, handle_files=handle_files)
repeat_job_value.append(repeat_instance_job_value)
json_value = repeat_job_value
elif input_type == "conditional":
values = value
current = values["__current_case__"]
conditional_job_value = {}
- json_wrap(input.cases[current].inputs, values, conditional_job_value)
+ json_wrap(input.cases[current].inputs, values, conditional_job_value, handle_files=handle_files)
test_param = input.test_param
test_param_name = test_param.name
- test_value = _json_wrap_input(test_param, values[test_param_name])
+ test_value = _json_wrap_input(test_param, values[test_param_name], handle_files=handle_files)
conditional_job_value[test_param_name] = test_value
json_value = conditional_job_value
elif input_type == "section":
values = value
section_job_value = {}
- json_wrap(input.inputs, values, section_job_value)
+ json_wrap(input.inputs, values, section_job_value, handle_files=handle_files)
json_value = section_job_value
elif input_type == "data" and input.multiple:
if handle_files == "paths":
diff --git a/lib/galaxy/tools/toolbox/base.py b/lib/galaxy/tools/toolbox/base.py
index e41ac9f426ad..d87b39aefc5b 100644
--- a/lib/galaxy/tools/toolbox/base.py
+++ b/lib/galaxy/tools/toolbox/base.py
@@ -15,7 +15,10 @@
from six.moves.urllib.parse import urlparse
from galaxy.exceptions import MessageException, ObjectNotFound
-from galaxy.tool_util.deps import build_dependency_manager
+from galaxy.tool_util.deps import (
+ build_dependency_manager,
+ NullDependencyManager
+)
from galaxy.tool_util.loader_directory import looks_like_a_tool
from galaxy.util import (
ExecutionTimer,
@@ -165,19 +168,19 @@ def _init_tools_from_config(self, config_filename):
try:
tool_conf_source = get_toolbox_parser(config_filename)
except (OSError, IOError) as exc:
- for opt in ('shed_tool_conf', 'migrated_tools_config'):
- if (config_filename == getattr(self.app.config, opt) and not
- getattr(self.app.config, opt + '_set') and
- exc.errno == errno.ENOENT):
- log.debug("Skipping loading missing default config file: %s", config_filename)
- stcd = dict(config_filename=config_filename,
- tool_path=self.app.config.shed_tools_dir,
- config_elems=[],
- create=SHED_TOOL_CONF_XML.format(shed_tools_dir=self.app.config.shed_tools_dir))
- self._dynamic_tool_confs.append(stcd)
- return
+ dynamic_confs = (self.app.config.shed_tool_config_file, self.app.config.migrated_tools_config)
+ if config_filename in dynamic_confs and exc.errno == errno.ENOENT:
+ log.info("Shed-enabled tool configuration file does not exist, but will be created on demand: %s",
+ config_filename)
+ stcd = dict(config_filename=config_filename,
+ tool_path=self.app.config.shed_tools_dir,
+ config_elems=[],
+ create=SHED_TOOL_CONF_XML.format(shed_tools_dir=self.app.config.shed_tools_dir))
+ self._dynamic_tool_confs.append(stcd)
+ return
raise
tool_path = tool_conf_source.parse_tool_path()
+ log.debug("Tool path for tool configuration %s is %s", config_filename, tool_path)
parsing_shed_tool_conf = tool_conf_source.is_shed_tool_conf()
if parsing_shed_tool_conf:
# Keep an in-memory list of xml elements to enable persistence of the changing tool config.
@@ -1158,6 +1161,10 @@ def _looks_like_a_tool(self, path):
return looks_like_a_tool(path, enable_beta_formats=getattr(self.app.config, "enable_beta_tool_formats", False))
def _init_dependency_manager(self):
+ use_tool_dependency_resolution = getattr(self.app, "use_tool_dependency_resolution", True)
+ if not use_tool_dependency_resolution:
+ self.dependency_manager = NullDependencyManager()
+ return
app_config_dict = self.app.config.config_dict
conf_file = app_config_dict.get("dependency_resolvers_config_file")
default_tool_dependency_dir = os.path.join(self.app.config.data_dir, "dependencies")
diff --git a/lib/galaxy/tools/toolbox/watcher.py b/lib/galaxy/tools/toolbox/watcher.py
index 7f2d7fce88ff..77b897d21b46 100644
--- a/lib/galaxy/tools/toolbox/watcher.py
+++ b/lib/galaxy/tools/toolbox/watcher.py
@@ -115,7 +115,12 @@ def check(self):
else:
continue
new_mod_time = os.path.getmtime(path)
- if new_mod_time > mod_time:
+ # mod_time can be None if a non-required config was just created
+ if not mod_time:
+ self.paths[path] = new_mod_time
+ log.debug("The file '%s' has been created.", path)
+ do_reload = True
+ elif new_mod_time > mod_time:
new_hash = md5_hash_file(path)
if hashes[path] != new_hash:
self.paths[path] = new_mod_time
@@ -160,22 +165,16 @@ def __init__(self, observer_class, even_handler_class, toolbox):
self.toolbox = toolbox
self.tool_file_ids = {}
self.tool_dir_callbacks = {}
- self.monitored_dirs = {}
-
- def monitor(self, dir):
- self.observer.schedule(self.event_handler, dir, recursive=False)
def watch_file(self, tool_file, tool_id):
tool_file = os.path.abspath(tool_file)
self.tool_file_ids[tool_file] = tool_id
tool_dir = os.path.dirname(tool_file)
if tool_dir not in self.monitored_dirs:
- self.monitored_dirs[tool_dir] = tool_dir
self.monitor(tool_dir)
def watch_directory(self, tool_dir, callback):
tool_dir = os.path.abspath(tool_dir)
self.tool_dir_callbacks[tool_dir] = callback
if tool_dir not in self.monitored_dirs:
- self.monitored_dirs[tool_dir] = tool_dir
self.monitor(tool_dir)
diff --git a/lib/galaxy/tools/wrappers.py b/lib/galaxy/tools/wrappers.py
index b5d9753fb034..4e24a28803a7 100644
--- a/lib/galaxy/tools/wrappers.py
+++ b/lib/galaxy/tools/wrappers.py
@@ -19,12 +19,6 @@
PATH_ATTRIBUTES = ["path"]
-# ... by default though - don't rewrite anything (if no ComputeEnviornment
-# defined or ComputeEnvironment doesn't supply a rewriter).
-def DEFAULT_PATH_REWRITER(x):
- return x
-
-
class ToolParameterValueWrapper(object):
"""
Base class for object that Wraps a Tool Parameter and Value.
@@ -138,29 +132,38 @@ class SelectToolParameterFieldWrapper(object):
Only applicable for dynamic_options selects, which have more than simple 'options' defined (name, value, selected).
"""
- def __init__(self, input, value, other_values, path_rewriter):
+ def __init__(self, input, value, other_values, compute_environment):
self._input = input
self._value = value
self._other_values = other_values
self._fields = {}
- self._path_rewriter = path_rewriter
+ self._compute_environment = compute_environment
def __getattr__(self, name):
if name not in self._fields:
self._fields[name] = self._input.options.get_field_by_name_for_value(name, self._value, None, self._other_values)
values = map(str, self._fields[name])
- if name in PATH_ATTRIBUTES:
+ if name in PATH_ATTRIBUTES and self._compute_environment:
# If we infer this is a path, rewrite it if needed.
- values = map(self._path_rewriter, values)
+ new_values = []
+ for value in values:
+ rewrite_value = self._compute_environment.unstructured_path_rewrite(value)
+ if rewrite_value:
+ new_values.append(rewrite_value)
+ else:
+ new_values.append(value)
+
+ values = new_values
+
return self._input.separator.join(values)
- def __init__(self, input, value, other_values={}, path_rewriter=None):
+ def __init__(self, input, value, other_values={}, compute_environment=None):
self.input = input
self.value = value
self.input.value_label = input.value_to_display_text(value)
self._other_values = other_values
- self._path_rewriter = path_rewriter or DEFAULT_PATH_REWRITER
- self.fields = self.SelectToolParameterFieldWrapper(input, value, other_values, self._path_rewriter)
+ self.compute_environment = compute_environment
+ self.fields = self.SelectToolParameterFieldWrapper(input, value, other_values, self.compute_environment)
def __eq__(self, other):
if isinstance(other, string_types):
@@ -201,15 +204,24 @@ class MetadataWrapper(object):
of a Metadata Collection.
"""
- def __init__(self, metadata):
- self.metadata = metadata
+ def __init__(self, dataset, compute_environment=None):
+ self.dataset = dataset
+ self.metadata = dataset.metadata
+ self.compute_environment = compute_environment
def __getattr__(self, name):
rval = self.metadata.get(name, None)
if name in self.metadata.spec:
if rval is None:
rval = self.metadata.spec[name].no_value
- rval = self.metadata.spec[name].param.to_safe_string(rval)
+ metadata_param = self.metadata.spec[name].param
+ from galaxy.model.metadata import FileParameter
+ rval = metadata_param.to_safe_string(rval)
+ if isinstance(metadata_param, FileParameter) and self.compute_environment:
+ rewrite = self.compute_environment.input_metadata_rewrite(self.dataset, rval)
+ if rewrite is not None:
+ rval = rewrite
+
# Store this value, so we don't need to recalculate if needed
# again
setattr(self, name, rval)
@@ -234,7 +246,7 @@ def get(self, key, default=None):
def items(self):
return iter((k, self.get(k)) for k, v in self.metadata.items())
- def __init__(self, dataset, datatypes_registry=None, tool=None, name=None, dataset_path=None, identifier=None):
+ def __init__(self, dataset, datatypes_registry=None, tool=None, name=None, compute_environment=None, identifier=None, io_type="input"):
if not dataset:
try:
# TODO: allow this to work when working with grouping
@@ -248,15 +260,28 @@ def __init__(self, dataset, datatypes_registry=None, tool=None, name=None, datas
# Should we name this .value to maintain consistency with most other ToolParameterValueWrapper?
self.unsanitized = dataset
self.dataset = wrap_with_safe_string(dataset, no_wrap_classes=ToolParameterValueWrapper)
- self.metadata = self.MetadataWrapper(dataset.metadata)
+ self.metadata = self.MetadataWrapper(dataset, compute_environment)
if hasattr(dataset, 'tags'):
self.groups = {tag.user_value.lower() for tag in dataset.tags if tag.user_tname == 'group'}
else:
# May be a 'FakeDatasetAssociation'
self.groups = set()
+ self.compute_environment = compute_environment
+ # TODO: lazy initialize this...
+ self.__io_type = io_type
+ if self.__io_type == "input":
+ path_rewrite = compute_environment and dataset and compute_environment.input_path_rewrite(dataset)
+ if path_rewrite:
+ self.false_path = path_rewrite
+ else:
+ self.false_path = None
+ else:
+ path_rewrite = compute_environment and compute_environment.output_path_rewrite(dataset)
+ if path_rewrite:
+ self.false_path = path_rewrite
+ else:
+ self.false_path = None
self.datatypes_registry = datatypes_registry
- self.false_path = getattr(dataset_path, "false_path", None)
- self.false_extra_files_path = getattr(dataset_path, "false_extra_files_path", None)
self._element_identifier = identifier
@property
@@ -290,26 +315,30 @@ def __getattr__(self, key):
if self.false_path is not None and key == 'file_name':
# Path to dataset was rewritten for this job.
return self.false_path
- elif self.false_extra_files_path is not None and key == 'extra_files_path':
- # Path to extra files was rewritten for this job.
- return self.false_extra_files_path
elif key == 'extra_files_path':
- try:
- # Assume it is an output and that this wrapper
- # will be set with correct "files_path" for this
- # job.
- return self.files_path
- except AttributeError:
- # Otherwise, we have an input - delegate to model and
- # object store to find the static location of this
- # directory.
+ if self.__io_type == "input":
+ path_rewrite = self.compute_environment and self.compute_environment.input_extra_files_rewrite(self.unsanitized)
+ else:
+ path_rewrite = self.compute_environment and self.compute_environment.output_extra_files_rewrite(self.unsanitized)
+ if path_rewrite:
+ return path_rewrite
+ else:
try:
- return self.unsanitized.extra_files_path
- except exceptions.ObjectNotFound:
- # NestedObjectstore raises an error here
- # instead of just returning a non-existent
- # path like DiskObjectStore.
- raise
+ # Assume it is an output and that this wrapper
+ # will be set with correct "files_path" for this
+ # job.
+ return self.files_path
+ except AttributeError:
+ # Otherwise, we have an input - delegate to model and
+ # object store to find the static location of this
+ # directory.
+ try:
+ return self.unsanitized.extra_files_path
+ except exceptions.ObjectNotFound:
+ # NestedObjectstore raises an error here
+ # instead of just returning a non-existent
+ # path like DiskObjectStore.
+ raise
else:
return getattr(self.dataset, key)
@@ -320,13 +349,8 @@ def __bool__(self):
class HasDatasets(object):
- def _dataset_wrapper(self, dataset, dataset_paths, **kwargs):
- wrapper_kwds = kwargs.copy()
- if dataset and dataset_paths:
- real_path = dataset.file_name
- if real_path in dataset_paths:
- wrapper_kwds["dataset_path"] = dataset_paths[real_path]
- return DatasetFilenameWrapper(dataset, **wrapper_kwds)
+ def _dataset_wrapper(self, dataset, **kwargs):
+ return DatasetFilenameWrapper(dataset, **kwargs)
def paths_as_file(self, sep="\n"):
contents = sep.join(map(str, self))
@@ -340,7 +364,7 @@ class DatasetListWrapper(list, ToolParameterValueWrapper, HasDatasets):
"""
"""
- def __init__(self, job_working_directory, datasets, dataset_paths=[], **kwargs):
+ def __init__(self, job_working_directory, datasets, **kwargs):
self._dataset_elements_cache = {}
if not isinstance(datasets, list):
datasets = [datasets]
@@ -350,7 +374,7 @@ def to_wrapper(dataset):
element = dataset
dataset = element.dataset_instance
kwargs["identifier"] = element.element_identifier
- return self._dataset_wrapper(dataset, dataset_paths, **kwargs)
+ return self._dataset_wrapper(dataset, **kwargs)
list.__init__(self, map(to_wrapper, datasets))
self.job_working_directory = job_working_directory
@@ -392,11 +416,10 @@ def __bool__(self):
class DatasetCollectionWrapper(ToolParameterValueWrapper, HasDatasets):
- def __init__(self, job_working_directory, has_collection, dataset_paths=[], **kwargs):
+ def __init__(self, job_working_directory, has_collection, **kwargs):
super(DatasetCollectionWrapper, self).__init__()
self.job_working_directory = job_working_directory
self._dataset_elements_cache = {}
- self.dataset_paths = dataset_paths
self.kwargs = kwargs
if has_collection is None:
@@ -427,9 +450,9 @@ def __init__(self, job_working_directory, has_collection, dataset_paths=[], **kw
element_identifier = dataset_collection_element.element_identifier
if dataset_collection_element.is_collection:
- element_wrapper = DatasetCollectionWrapper(job_working_directory, dataset_collection_element, dataset_paths, **kwargs)
+ element_wrapper = DatasetCollectionWrapper(job_working_directory, dataset_collection_element, **kwargs)
else:
- element_wrapper = self._dataset_wrapper(element_object, dataset_paths, identifier=element_identifier, **kwargs)
+ element_wrapper = self._dataset_wrapper(element_object, identifier=element_identifier, **kwargs)
element_instances[element_identifier] = element_wrapper
element_instance_list.append(element_wrapper)
@@ -443,7 +466,7 @@ def get_datasets_for_group(self, group):
wrappers = []
for element in self.collection.dataset_elements:
if any([t for t in element.dataset_instance.tags if t.user_tname.lower() == 'group' and t.value.lower() == group]):
- wrappers.append(self._dataset_wrapper(element.element_object, self.dataset_paths, identifier=element.element_identifier, **self.kwargs))
+ wrappers.append(self._dataset_wrapper(element.element_object, identifier=element.element_identifier, **self.kwargs))
self._dataset_elements_cache[group] = wrappers
return self._dataset_elements_cache[group]
diff --git a/lib/galaxy/util/plugin_config.py b/lib/galaxy/util/plugin_config.py
index 894601e7f752..c15e47f61ef5 100644
--- a/lib/galaxy/util/plugin_config.py
+++ b/lib/galaxy/util/plugin_config.py
@@ -1,3 +1,4 @@
+import collections
from xml.etree import ElementTree
try:
@@ -8,6 +9,9 @@
from galaxy.util.submodules import import_submodules
+PluginConfigSource = collections.namedtuple('PluginConfigSource', ['type', 'source'])
+
+
def plugins_dict(module, plugin_type_identifier):
""" Walk through all classes in submodules of module and find ones labelled
with specified plugin_type_identifier and throw in a dictionary to allow
@@ -33,11 +37,10 @@ def plugins_dict(module, plugin_type_identifier):
def load_plugins(plugins_dict, plugin_source, extra_kwds=None, plugin_type_keys=('type',)):
if extra_kwds is None:
extra_kwds = {}
- source_type, source = plugin_source
- if source_type == "xml":
- return __load_plugins_from_element(plugins_dict, source, extra_kwds)
+ if plugin_source.type == "xml":
+ return __load_plugins_from_element(plugins_dict, plugin_source.source, extra_kwds)
else:
- return __load_plugins_from_dicts(plugins_dict, source, extra_kwds, plugin_type_keys=plugin_type_keys)
+ return __load_plugins_from_dicts(plugins_dict, plugin_source.source, extra_kwds, plugin_type_keys=plugin_type_keys)
def __load_plugins_from_element(plugins_dict, plugins_element, extra_kwds):
@@ -80,9 +83,9 @@ def __load_plugins_from_dicts(plugins_dict, configs, extra_kwds, plugin_type_key
def plugin_source_from_path(path):
if path.endswith(".yaml") or path.endswith(".yml") or path.endswith(".yaml.sample") or path.endswith(".yml.sample"):
- return ('dict', __read_yaml(path))
+ return PluginConfigSource('dict', __read_yaml(path))
else:
- return ('xml', ElementTree.parse(path).getroot())
+ return PluginConfigSource('xml', ElementTree.parse(path).getroot())
def __read_yaml(path):
diff --git a/lib/galaxy/util/tool_shed/xml_util.py b/lib/galaxy/util/tool_shed/xml_util.py
index 5e7e9db05e4a..390c1fcf4283 100644
--- a/lib/galaxy/util/tool_shed/xml_util.py
+++ b/lib/galaxy/util/tool_shed/xml_util.py
@@ -76,10 +76,10 @@ def create_element(tag, attributes=None, sub_elements=None):
return None
-def parse_xml(file_name):
+def parse_xml(file_name, check_exists=True):
"""Returns a parsed xml tree with comments intact."""
error_message = ''
- if not os.path.exists(file_name):
+ if check_exists and not os.path.exists(file_name):
return None, "File does not exist %s" % str(file_name)
with open(file_name, 'r') as fobj:
diff --git a/lib/galaxy/util/watcher.py b/lib/galaxy/util/watcher.py
index 5ae9e41f6bb3..0fe0c7216a59 100644
--- a/lib/galaxy/util/watcher.py
+++ b/lib/galaxy/util/watcher.py
@@ -71,11 +71,22 @@ def __init__(self, observer_class, even_handler_class, **kwargs):
self.observer = None
self.observer_class = observer_class
self.event_handler = even_handler_class(self)
+ self.monitored_dirs = {}
def start(self):
if self.observer is None:
self.observer = self.observer_class()
self.observer.start()
+ self.resume_watching()
+
+ def monitor(self, dir_path, recursive=False):
+ self.monitored_dirs[dir_path] = recursive
+ if self.observer is not None:
+ self.observer.schedule(self.event_handler, dir_path, recursive=recursive)
+
+ def resume_watching(self):
+ for dir_path, recursive in self.monitored_dirs.items():
+ self.monitor(dir_path, recursive)
def shutdown(self):
if self.observer is not None:
@@ -88,7 +99,6 @@ class Watcher(BaseWatcher):
def __init__(self, observer_class, event_handler_class, **kwargs):
super(Watcher, self).__init__(observer_class, event_handler_class, **kwargs)
- self.monitored_dirs = {}
self.path_hash = {}
self.file_callbacks = {}
self.dir_callbacks = {}
@@ -96,16 +106,12 @@ def __init__(self, observer_class, event_handler_class, **kwargs):
self.require_extensions = {}
self.event_handler = event_handler_class(self)
- def monitor(self, dir, recursive=False):
- self.observer.schedule(self.event_handler, dir, recursive=recursive)
-
def watch_file(self, file_path, callback=None):
file_path = os.path.abspath(file_path)
dir_path = os.path.dirname(file_path)
if dir_path not in self.monitored_dirs:
if callback is not None:
self.file_callbacks[file_path] = callback
- self.monitored_dirs[dir_path] = dir_path
self.monitor(dir_path)
log.debug("Watching for changes to file: %s", file_path)
@@ -118,7 +124,6 @@ def watch_directory(self, dir_path, callback=None, recursive=False, ignore_exten
self.ignore_extensions[dir_path] = ignore_extensions
if require_extensions:
self.require_extensions[dir_path] = require_extensions
- self.monitored_dirs[dir_path] = dir_path
self.monitor(dir_path, recursive=recursive)
log.debug("Watching for changes in directory%s: %s", ' (recursively)' if recursive else '', dir_path)
diff --git a/lib/galaxy/web/framework/decorators.py b/lib/galaxy/web/framework/decorators.py
index 14779e5d501c..9a983edfcb00 100644
--- a/lib/galaxy/web/framework/decorators.py
+++ b/lib/galaxy/web/framework/decorators.py
@@ -239,7 +239,7 @@ def decorator(self, trans, *args, **kwargs):
# error if anon and no session
if not trans.galaxy_session and user_or_session_required:
return __api_error_response(trans, status_code=403, err_code=error_codes.USER_NO_API_KEY,
- err_msg="API authentication required for this request")
+ err_msg="API authentication or Galaxy session required for this request")
if trans.request.body:
try:
diff --git a/lib/galaxy/webapps/galaxy/api/histories.py b/lib/galaxy/webapps/galaxy/api/histories.py
index 72c17dbcab73..7485385528a6 100644
--- a/lib/galaxy/webapps/galaxy/api/histories.py
+++ b/lib/galaxy/webapps/galaxy/api/histories.py
@@ -219,6 +219,9 @@ def show(self, trans, id, deleted='False', **kwd):
@expose_api_anonymous
def citations(self, trans, history_id, **kwd):
"""
+ GET /api/histories/{id}/citations
+ Return all the citations for the tools used to produce the datasets in
+ the history.
"""
history = self.manager.get_accessible(self.decode_id(history_id), trans.user, current_history=trans.history)
tool_ids = set([])
@@ -256,8 +259,7 @@ def published(self, trans, **kwd):
rval.append(history_dict)
return rval
- # TODO: does this need to be anonymous_and_sessionless? Not just expose_api?
- @expose_api_anonymous_and_sessionless
+ @expose_api
def shared_with_me(self, trans, **kwd):
"""
shared_with_me( self, trans, **kwd )
@@ -443,7 +445,7 @@ def update(self, trans, id, payload, **kwd):
@expose_api
def archive_export(self, trans, id, **kwds):
"""
- export_archive( self, trans, id, payload )
+ export_archive(self, trans, id, payload)
* PUT /api/histories/{id}/exports:
start job (if needed) to create history export for corresponding
history.
diff --git a/lib/galaxy/webapps/galaxy/api/history_contents.py b/lib/galaxy/webapps/galaxy/api/history_contents.py
index 3b3bb58245e1..d4e8c395c2f2 100644
--- a/lib/galaxy/webapps/galaxy/api/history_contents.py
+++ b/lib/galaxy/webapps/galaxy/api/history_contents.py
@@ -181,7 +181,7 @@ def show(self, trans, id, history_id, **kwd):
def index_jobs_summary(self, trans, history_id, **kwd):
"""
* GET /api/histories/{history_id}/jobs_summary
- return detailed information about an HDA or HDCAs jobs
+ return job state summary info for jobs, implicit groups jobs for collections or workflow invocations
Warning: We allow anyone to fetch job state information about any object they
can guess an encoded ID for - it isn't considered protected data. This keeps
@@ -189,13 +189,13 @@ def index_jobs_summary(self, trans, history_id, **kwd):
efficient as possible.
:type history_id: str
- :param history_id: encoded id string of the HDA's or the HDCA's History
+ :param history_id: encoded id string of the target history
:type ids: str[]
:param ids: the encoded ids of job summary objects to return - if ids
is specified types must also be specified and have same length.
:type types: str[]
- :param types: type of object represented by elements in the ids array - either
- Job or ImplicitCollectionJob.
+ :param types: type of object represented by elements in the ids array - any of
+ Job, ImplicitCollectionJob, or WorkflowInvocation.
:rtype: dict[]
:returns: an array of job summary object dictionaries.
@@ -207,9 +207,9 @@ def index_jobs_summary(self, trans, history_id, **kwd):
# TODO: ...
pass
else:
- ids = util.listify(ids)
+ ids = [self.app.security.decode_id(i) for i in util.listify(ids)]
types = util.listify(types)
- return [self.encode_all_ids(trans, s) for s in fetch_job_states(self.app, trans.sa_session, ids, types)]
+ return [self.encode_all_ids(trans, s) for s in fetch_job_states(trans.sa_session, ids, types)]
@expose_api_anonymous
def show_jobs_summary(self, trans, id, history_id, **kwd):
diff --git a/lib/galaxy/webapps/galaxy/api/toolshed.py b/lib/galaxy/webapps/galaxy/api/toolshed.py
index f16c03862b4d..8a41c42c9ae9 100644
--- a/lib/galaxy/webapps/galaxy/api/toolshed.py
+++ b/lib/galaxy/webapps/galaxy/api/toolshed.py
@@ -335,6 +335,9 @@ def request(self, trans, **params):
pathspec.append(params.pop("id"))
if "action" in params:
pathspec.append(params.pop("action"))
- return json.loads(util.url_get(tool_shed_url, params=dict(params), pathspec=pathspec))
+ try:
+ return json.loads(util.url_get(tool_shed_url, params=dict(params), pathspec=pathspec))
+ except Exception as e:
+ raise MessageException("Invalid server response. %s." % str(e))
else:
raise MessageException("Invalid toolshed url.")
diff --git a/lib/galaxy/webapps/galaxy/api/workflows.py b/lib/galaxy/webapps/galaxy/api/workflows.py
index 6fa7643ff38f..b81f0afefaa7 100644
--- a/lib/galaxy/webapps/galaxy/api/workflows.py
+++ b/lib/galaxy/webapps/galaxy/api/workflows.py
@@ -22,6 +22,7 @@
histories,
workflows
)
+from galaxy.managers.jobs import fetch_job_states, invocation_job_source_iter
from galaxy.model.item_attrs import UsesAnnotations
from galaxy.tools.parameters import populate_state
from galaxy.tools.parameters.basic import workflow_building_modes
@@ -139,13 +140,14 @@ def get_workflows_list(self, trans, kwd):
if show_published:
filter1 = or_(filter1, (trans.app.model.StoredWorkflow.published == true()))
for wf in trans.sa_session.query(trans.app.model.StoredWorkflow).options(
+ joinedload("annotations")).options(
joinedload("latest_workflow").undefer("step_count").lazyload("steps")).options(
joinedload("tags")).filter(
filter1, trans.app.model.StoredWorkflow.table.c.deleted == false()).order_by(
desc(trans.app.model.StoredWorkflow.table.c.update_time)).all():
-
item = wf.to_dict(value_mapper={'id': trans.security.encode_id})
encoded_id = trans.security.encode_id(wf.id)
+ item['annotations'] = [x.annotation for x in wf.annotations]
item['url'] = url_for('workflow', id=encoded_id)
item['owner'] = wf.user.username
item['number_of_steps'] = wf.latest_workflow.step_count
@@ -155,6 +157,7 @@ def get_workflows_list(self, trans, kwd):
rval.append(item)
for wf_sa in trans.sa_session.query(model.StoredWorkflowUserShareAssociation).join(
model.StoredWorkflowUserShareAssociation.stored_workflow).options(
+ joinedload("stored_workflow").joinedload("annotations")).options(
joinedload("stored_workflow").joinedload("latest_workflow").undefer("step_count").lazyload("steps")).options(
joinedload("stored_workflow").joinedload("user")).options(
joinedload("stored_workflow").joinedload("tags")).filter(model.StoredWorkflowUserShareAssociation.user == trans.user).filter(
@@ -162,6 +165,7 @@ def get_workflows_list(self, trans, kwd):
desc(model.StoredWorkflow.update_time)).all():
item = wf_sa.stored_workflow.to_dict(value_mapper={'id': trans.security.encode_id})
encoded_id = trans.security.encode_id(wf_sa.stored_workflow.id)
+ item['annotations'] = [x.annotation for x in wf_sa.stored_workflow.annotations]
item['url'] = url_for('workflow', id=encoded_id)
item['slug'] = wf_sa.stored_workflow.slug
item['owner'] = wf_sa.stored_workflow.user.username
@@ -650,6 +654,7 @@ def __api_import_new_workflow(self, trans, payload, **kwd):
# api encoded, id
encoded_id = trans.security.encode_id(workflow_id)
item = workflow.to_dict(value_mapper={'id': trans.security.encode_id})
+ item['annotations'] = [x.annotation for x in workflow.annotations]
item['url'] = url_for('workflow', id=encoded_id)
item['owner'] = workflow.user.username
item['number_of_steps'] = len(workflow.latest_workflow.steps)
@@ -812,19 +817,19 @@ def index_invocations(self, trans, workflow_id=None, **kwd):
# Get all invocation if user is admin
user_id = None
+ include_terminal = util.string_as_bool(kwd.get("include_terminal", True))
invocations = self.workflow_manager.build_invocations_query(
- trans, stored_workflow_id=stored_workflow_id, history_id=history_id, user_id=user_id
+ trans, stored_workflow_id=stored_workflow_id, history_id=history_id, user_id=user_id, include_terminal=include_terminal
)
return self.workflow_manager.serialize_workflow_invocations(invocations, **kwd)
@expose_api
- def show_invocation(self, trans, workflow_id, invocation_id, **kwd):
+ def show_invocation(self, trans, invocation_id, **kwd):
"""
GET /api/workflows/{workflow_id}/invocations/{invocation_id}
- Get detailed description of workflow invocation
+ GET /api/invocations/{invocation_id}
- :param workflow_id: the workflow id (required)
- :type workflow_id: str
+ Get detailed description of workflow invocation
:param invocation_id: the invocation id (required)
:type invocation_id: str
@@ -856,14 +861,12 @@ def show_invocation(self, trans, workflow_id, invocation_id, **kwd):
return None
@expose_api
- def cancel_invocation(self, trans, workflow_id, invocation_id, **kwd):
+ def cancel_invocation(self, trans, invocation_id, **kwd):
"""
DELETE /api/workflows/{workflow_id}/invocations/{invocation_id}
+ DELETE /api/invocations/{invocation_id}
Cancel the specified workflow invocation.
- :param workflow_id: the workflow id (required)
- :type workflow_id: str
-
:param invocation_id: the usage id (required)
:type invocation_id: str
@@ -874,9 +877,10 @@ def cancel_invocation(self, trans, workflow_id, invocation_id, **kwd):
return self.__encode_invocation(workflow_invocation, **kwd)
@expose_api
- def show_invocation_report(self, trans, workflow_id, invocation_id, **kwd):
+ def show_invocation_report(self, trans, invocation_id, **kwd):
"""
GET /api/workflows/{workflow_id}/invocations/{invocation_id}/report
+ GET /api/invocations/{invocation_id}/report
Get JSON summarizing invocation for reporting.
"""
@@ -889,12 +893,10 @@ def show_invocation_report(self, trans, workflow_id, invocation_id, **kwd):
)
@expose_api
- def invocation_step(self, trans, workflow_id, invocation_id, step_id, **kwd):
+ def invocation_step(self, trans, invocation_id, step_id, **kwd):
"""
GET /api/workflows/{workflow_id}/invocations/{invocation_id}/steps/{step_id}
-
- :param workflow_id: the workflow id (required)
- :type workflow_id: str
+ GET /api/invocations/{invocation_id}/steps/{step_id}
:param invocation_id: the invocation id (required)
:type invocation_id: str
@@ -914,18 +916,64 @@ def invocation_step(self, trans, workflow_id, invocation_id, step_id, **kwd):
)
return self.__encode_invocation_step(trans, invocation_step)
+ @expose_api_anonymous_and_sessionless
+ def invocation_step_jobs_summary(self, trans, invocation_id, **kwd):
+ """
+ * GET /api/workflows/{workflow_id}/invocations/{invocation_id}/step_jobs_summary
+ GET /api/invocations/{invocation_id}/step_jobs_summary
+ return job state summary info aggregated across per step of the workflow invocation
+
+ Warning: We allow anyone to fetch job state information about any object they
+ can guess an encoded ID for - it isn't considered protected data. This keeps
+ polling IDs as part of state calculation for large histories and collections as
+ efficient as possible.
+
+ :param invocation_id: the invocation id (required)
+ :type invocation_id: str
+
+ :rtype: dict[]
+ :returns: an array of job summary object dictionaries for each step
+ """
+ decoded_invocation_id = self.decode_id(invocation_id)
+ ids = []
+ types = []
+ for (job_source_type, job_source_id, _) in invocation_job_source_iter(trans.sa_session, decoded_invocation_id):
+ ids.append(job_source_id)
+ types.append(job_source_type)
+ return [self.encode_all_ids(trans, s) for s in fetch_job_states(trans.sa_session, ids, types)]
+
+ @expose_api_anonymous_and_sessionless
+ def invocation_jobs_summary(self, trans, invocation_id, **kwd):
+ """
+ * GET /api/workflows/{workflow_id}/invocations/{invocation_id}/jobs_summary
+ GET /api/invocations/{invocation_id}/jobs_summary
+ return job state summary info aggregated across all current jobs of workflow invocation
+
+ Warning: We allow anyone to fetch job state information about any object they
+ can guess an encoded ID for - it isn't considered protected data. This keeps
+ polling IDs as part of state calculation for large histories and collections as
+ efficient as possible.
+
+ :param invocation_id: the invocation id (required)
+ :type invocation_id: str
+
+ :rtype: dict
+ :returns: a job summary object merged for all steps in workflow invocation
+ """
+ ids = [self.decode_id(invocation_id)]
+ types = ["WorkflowInvocation"]
+ return [self.encode_all_ids(trans, s) for s in fetch_job_states(trans.sa_session, ids, types)][0]
+
@expose_api
- def update_invocation_step(self, trans, workflow_id, invocation_id, step_id, payload, **kwd):
+ def update_invocation_step(self, trans, invocation_id, step_id, payload, **kwd):
"""
PUT /api/workflows/{workflow_id}/invocations/{invocation_id}/steps/{step_id}
+ PUT /api/invocations/{invocation_id}/steps/{step_id}
+
Update state of running workflow step invocation - still very nebulous
but this would be for stuff like confirming paused steps can proceed
etc....
-
- :param workflow_id: the workflow id (required)
- :type workflow_id: str
-
:param invocation_id: the usage id (required)
:type invocation_id: str
diff --git a/lib/galaxy/webapps/galaxy/buildapp.py b/lib/galaxy/webapps/galaxy/buildapp.py
index 7f2e24cbdb85..0fa48e71d3fc 100644
--- a/lib/galaxy/webapps/galaxy/buildapp.py
+++ b/lib/galaxy/webapps/galaxy/buildapp.py
@@ -104,6 +104,7 @@ def app_factory(global_conf, load_app_kwds={}, **kwargs):
webapp.add_client_route('/admin/data_tables', 'admin')
webapp.add_client_route('/admin/data_types', 'admin')
webapp.add_client_route('/admin/jobs', 'admin')
+ webapp.add_client_route('/admin/invocations', 'admin')
webapp.add_client_route('/admin/data_manager{path_info:.*}', 'admin')
webapp.add_client_route('/admin/error_stack', 'admin')
webapp.add_client_route('/admin/users', 'admin')
@@ -518,6 +519,14 @@ def populate_api_routes(webapp, app):
webapp.mapper.connect('/api/datasets/{dataset_id}/converted/{ext}', controller='datasets', action='converted')
webapp.mapper.connect('/api/datasets/{dataset_id}/permissions', controller='datasets', action='update_permissions', conditions=dict(method=["PUT"]))
+ webapp.mapper.connect(
+ 'list_invocations',
+ '/api/invocations',
+ controller='workflows',
+ action='index_invocations',
+ conditions=dict(method=['GET'])
+ )
+
# API refers to usages and invocations - these mean the same thing but the
# usage routes should be considered deprecated.
invoke_names = {
@@ -534,60 +543,48 @@ def populate_api_routes(webapp, app):
conditions=dict(method=['GET'])
)
webapp.mapper.connect(
- 'list_invocations',
- '/api/invocations',
- controller='workflows',
- action='index_invocations',
- conditions=dict(method=['GET'])
- )
-
- webapp.mapper.connect(
- 'workflow_%s_contents' % name,
- '/api/workflows/{workflow_id}/%s/{invocation_id}' % noun,
+ 'workflow_%s' % name,
+ '/api/workflows/{workflow_id}/%s' % noun,
controller='workflows',
- action='show_invocation',
- conditions=dict(method=['GET'])
+ action='invoke',
+ conditions=dict(method=['POST'])
)
+ def connect_invocation_endpoint(endpoint_name, endpoint_suffix, action, conditions=None):
+ # /api/invocations/
+ # /api/workflows//invocations/
+ # /api/workflows//usage/ (deprecated)
+ conditions = conditions or dict(method=['GET'])
webapp.mapper.connect(
- 'workflow_%s_report' % name,
- '/api/workflows/{workflow_id}/%s/{invocation_id}/report' % noun,
+ 'workflow_invocation_%s' % endpoint_name,
+ '/api/workflows/{workflow_id}/invocations/{invocation_id}' + endpoint_suffix,
controller='workflows',
- action='show_invocation_report',
- conditions=dict(method=['GET'])
+ action=action,
+ conditions=conditions,
)
-
webapp.mapper.connect(
- 'cancel_workflow_%s' % name,
- '/api/workflows/{workflow_id}/%s/{invocation_id}' % noun,
+ 'workflow_usage_%s' % endpoint_name,
+ '/api/workflows/{workflow_id}/usage/{invocation_id}' + endpoint_suffix,
controller='workflows',
- action='cancel_invocation',
- conditions=dict(method=['DELETE'])
+ action=action,
+ conditions=conditions,
)
-
webapp.mapper.connect(
- 'workflow_%s_step' % name,
- '/api/workflows/{workflow_id}/%s/{invocation_id}/steps/{step_id}' % noun,
+ 'invocation_%s' % endpoint_name,
+ '/api/invocations/{invocation_id}' + endpoint_suffix,
controller='workflows',
- action='invocation_step',
- conditions=dict(method=['GET'])
+ action=action,
+ conditions=conditions,
)
- webapp.mapper.connect(
- 'workflow_%s_step_update' % name,
- '/api/workflows/{workflow_id}/%s/{invocation_id}/steps/{step_id}' % noun,
- controller='workflows',
- action='update_invocation_step',
- conditions=dict(method=['PUT'])
- )
+ connect_invocation_endpoint('show', '', action='show_invocation')
+ connect_invocation_endpoint('show_report', '/report', action='show_invocation_report')
+ connect_invocation_endpoint('jobs_summary', '/jobs_summary', action='invocation_jobs_summary')
+ connect_invocation_endpoint('step_jobs_summary', '/step_jobs_summary', action='invocation_step_jobs_summary')
+ connect_invocation_endpoint('cancel', '', action='cancel_invocation', conditions=dict(method=['DELETE']))
+ connect_invocation_endpoint('show_step', '/steps/{step_id}', action='invocation_step')
+ connect_invocation_endpoint('update_step', '/steps/{step_id}', action='update_invocation_step', conditions=dict(method=['PUT']))
- webapp.mapper.connect(
- 'workflow_%s' % name,
- '/api/workflows/{workflow_id}/%s' % noun,
- controller='workflows',
- action='invoke',
- conditions=dict(method=['POST'])
- )
# ============================
# ===== AUTHENTICATE API =====
# ============================
diff --git a/lib/galaxy/webapps/galaxy/config_schema.yml b/lib/galaxy/webapps/galaxy/config_schema.yml
index 23b3c7b36b6c..02cb6f9f2f94 100644
--- a/lib/galaxy/webapps/galaxy/config_schema.yml
+++ b/lib/galaxy/webapps/galaxy/config_schema.yml
@@ -45,7 +45,6 @@ mapping:
database_connection:
type: str
- default: sqlite:///./database/universe.sqlite?isolation_level=IMMEDIATE
required: false
desc: |
By default, Galaxy uses a SQLite database at 'database/universe.sqlite'. You
@@ -53,6 +52,8 @@ mapping:
instead. This string takes many options which are explained in detail in the
config file documentation.
+ Sample default 'sqlite:////universe.sqlite?isolation_level=IMMEDIATE'
+
database_engine_option_pool_size:
type: int
default: 5
@@ -107,7 +108,7 @@ mapping:
slow_query_log_threshold:
type: float
- default: 0
+ default: 0.0
required: false
desc: |
Slow query logging. Queries slower than the threshold indicated below will
@@ -126,7 +127,6 @@ mapping:
install_database_connection:
type: str
- default: sqlite:///./database/universe.sqlite?isolation_level=IMMEDIATE
required: false
desc: |
By default, Galaxy will use the same database to track user data and
@@ -136,6 +136,8 @@ mapping:
separate the tool shed install database (all other options listed above
but prefixed with install_ are also available).
+ Defaults to the value of the 'database_connection' option.
+
database_auto_migrate:
type: bool
default: false
@@ -168,29 +170,51 @@ mapping:
file_path:
type: str
- default: database/files
+ default: files
required: false
desc: |
- Where dataset files are stored. It must accessible at the same path on any cluster
+ Where dataset files are stored. It must be accessible at the same path on any cluster
nodes that will run Galaxy jobs, unless using Pulsar.
+ Default value will be resolved to 'database/files' where 'database' is the default value
+ of the 'data_dir' option).
+
new_file_path:
type: str
- default: database/tmp
+ default: tmp
required: false
desc: |
- Where temporary files are stored. It must accessible at the same path on any cluster
+ Where temporary files are stored. It must be accessible at the same path on any cluster
nodes that will run Galaxy jobs, unless using Pulsar.
+ Default value will be resolved to 'database/tmp' where 'database' is the default value
+ of the 'data_dir' option).
+
tool_config_file:
- type: str
- default: config/tool_conf.xml,config/shed_tool_conf.xml
+ type: any
+ default: config/tool_conf.xml
required: false
desc: |
Tool config files, defines what tools are available in Galaxy.
Tools can be locally developed or installed from Galaxy tool sheds.
(config/tool_conf.xml.sample will be used if left unset and
- config/tool_conf.xml does not exist).
+ config/tool_conf.xml does not exist). Can be a single file, a list of
+ files, or (for backwards compatibility) a comma-separated list of files.
+
+ shed_tool_config_file:
+ type: str
+ default: config/shed_tool_conf.xml
+ required: false
+ desc: |
+ Tool config file for tools installed from the Galaxy Tool Shed. Must
+ be writable by Galaxy and generally should not be edited by hand. In
+ older Galaxy releases, this file was part of the tool_config_file
+ option. It is still possible to specify this file (and other
+ shed-enabled tool config files) in tool_config_file, but in the
+ standard case of a single shed-enabled tool config, this option is
+ preferable. This file will be created automatically upon tool
+ installation, whereas Galaxy will fail to start if any files in
+ tool_config_file cannot be read.
check_migrate_tools:
type: bool
@@ -205,13 +229,16 @@ mapping:
migrated_tools_config:
type: str
- default: config/migrated_tools_conf.xml
+ default: migrated_tools_conf.xml
required: false
desc: |
Tool config maintained by tool migration scripts. If you use the migration
scripts to install tools that have been migrated to the tool shed upon a new
release, they will be added to this tool config file.
+ Default value will be resolved to 'config/migrated_tools_conf.xml' where 'config' is the
+ default value of the 'config_dir' option).
+
integrated_tool_panel_config:
type: str
default: integrated_tool_panel.xml
@@ -233,17 +260,20 @@ mapping:
tool_dependency_dir:
type: str
- default: database/dependencies
+ default: dependencies
required: false
desc: |
Various dependency resolver configuration parameters will have defaults set relative
to this path, such as the default conda prefix, default Galaxy packages path, legacy
tool shed dependencies path, and the dependency cache directory.
- Set the string to None to explicitly disable tool dependency handling.
+ Set the string to null to explicitly disable tool dependency handling.
If this option is set to none or an invalid path, installing tools with dependencies
from the Tool Shed or in Conda will fail.
+ Default value will be resolved to 'database/dependencies' where 'database' is the default
+ value of the 'data_dir' option).
+
dependency_resolvers_config_file:
type: str
default: config/dependency_resolvers_conf.xml
@@ -258,12 +288,12 @@ mapping:
conda_prefix:
type: str
- default: /_conda
required: false
desc: |
- conda_prefix is the location on the filesystem where Conda packages and environments are installed
- IMPORTANT: Due to a current limitation in conda, the total length of the
- conda_prefix and the job_working_directory path should be less than 50 characters!
+ conda_prefix is the location on the filesystem where Conda packages and environments are
+ installed.
+
+ Sample default '/_conda'
conda_exec:
type: str
@@ -339,11 +369,12 @@ mapping:
tool_dependency_cache_dir:
type: str
- default: /_cache
required: false
desc: |
By default the tool_dependency_cache_dir is the _cache directory
- of the tool dependency directory
+ of the tool dependency directory.
+
+ Sample default '/_cache'
precache_dependencies:
type: bool
@@ -429,7 +460,6 @@ mapping:
involucro_path:
type: str
- default: database/dependencies/involucro
required: false
desc: |
involucro is a tool used to build Docker or Singularity containers for tools from Conda
@@ -438,6 +468,8 @@ mapping:
container resolver isn't enabled, and will install on demand unless
involucro_auto_init is set to false.
+ Sample default '/involucro'
+
involucro_auto_init:
type: bool
default: true
@@ -514,11 +546,10 @@ mapping:
shed_tool_data_path:
type: str
- default: tool-data
required: false
desc: |
- Directory where Tool Data Table related files will be placed
- when installed from a ToolShed. Defaults to tool_data_path.
+ Directory where Tool Data Table related files will be placed when installed from a
+ ToolShed. Defaults to the value of the 'tool_data_path' option.
watch_tool_data_dir:
type: str
@@ -535,17 +566,19 @@ mapping:
builds_file_path:
type: str
- default: tool-data/shared/ucsc/builds.txt
+ default: shared/ucsc/builds.txt
required: false
desc: |
- File containing old-style genome builds
+ File containing old-style genome builds. Value will be resolved with respect to
+ .
len_file_path:
type: str
- default: tool-data/shared/ucsc/chrom
+ default: shared/ucsc/chrom
required: false
desc: |
- Directory where chrom len files are kept, currently mainly used by trackster
+ Directory where chrom len files are kept, currently mainly used by trackster. Value will
+ be resolved with respect to .
datatypes_config_file:
type: str
@@ -567,7 +600,6 @@ mapping:
With this option set to false the compressed datatypes will be unpacked
before sniffing.
-
datatypes_disable_auto:
type: bool
default: false
@@ -582,8 +614,7 @@ mapping:
desc: |
Visualizations config directory: where to look for individual visualization
plugins. The path is relative to the Galaxy root dir. To use an absolute
- path begin the path with '/'. This is a comma separated list.
- Defaults to "config/plugins/visualizations".
+ path begin the path with '/'. This is a comma-separated list.
interactive_environment_plugins_directory:
type: str
@@ -594,31 +625,7 @@ mapping:
config/plugins/interactive_environments to load Galaxy's stock plugins.
These will require Docker to be configured and have security considerations,
so proceed with caution. The path is relative to the Galaxy root dir. To use
- an absolute path begin the path with '/'. This is a comma
- separated list.
-
- interactive_environment_swarm_mode:
- type: bool
- default: false
- required: false
- desc: |
- To run interactive environment containers in Docker Swarm mode (on an
- existing swarm), set this option to true and set `docker_connect_port` in the
- IE plugin config (ini) file(s) of any IE plugins you have enabled and ensure
- that you are not using any `docker run`-specific options in your plugins'
- `command_inject` options (swarm mode services run using `docker service
- create`, which has a different and more limited set of options). This option
- can be overridden on a per-plugin basis by using the `swarm_mode` option in
- the plugin's ini config file.
-
- swarm_manager_config_file:
- type: str
- default: config/swarm_manager_conf.yml
- required: false
- desc: |
- Galaxy can run a "swarm manager" service that will monitor utilization of the
- swarm and provision/deprovision worker nodes as necessary. The service has
- its own configuration file.
+ an absolute path begin the path with '/'. This is a comma-separated list.
tour_config_dir:
type: str
@@ -644,29 +651,37 @@ mapping:
job_working_directory:
type: str
- default: database/jobs_directory
+ default: jobs_directory
required: false
desc: |
Each job is given a unique empty directory as its current working directory.
This option defines in what parent directory those directories will be
created.
+ Default value will be resolved to 'database/jobs_directory' where 'database' is the
+ default value of the 'data_dir' option).
+
cluster_files_directory:
type: str
- default: database/pbs
+ default: pbs
required: false
desc: |
If using a cluster, Galaxy will write job scripts and stdout/stderr to this
directory.
+ Value will be resolved with respect to .
+
template_cache_path:
type: str
- default: database/compiled_templates
+ default: compiled_templates
required: false
desc: |
Mako templates are compiled as needed and cached for reuse, this directory is
used for the cache
+ Default value will be resolved to 'database/compiled_templates' where 'database' is the
+ default value of the 'data_dir' option).
+
check_job_script_integrity:
type: bool
default: true
@@ -712,22 +727,28 @@ mapping:
citation_cache_data_dir:
type: str
- default: database/citations/data
+ default: citations/data
required: false
desc: |
Citation related caching. Tool citations information maybe fetched from
external sources such as https://doi.org/ by Galaxy - the following
parameters can be used to control the caching used to store this information.
+ Default value will be resolved to 'database/citations/data' where 'database' is the
+ default value of the 'data_dir' option).
+
citation_cache_lock_dir:
type: str
- default: database/citations/lock
+ default: citations/locks
required: false
desc: |
Citation related caching. Tool citations information maybe fetched from
external sources such as https://doi.org/ by Galaxy - the following
parameters can be used to control the caching used to store this information.
+ Default value will be resolved to 'database/citations/locks' where 'database' is the
+ default value of the 'data_dir' option).
+
object_store_config_file:
type: str
default: config/object_store_conf.xml
@@ -779,13 +800,14 @@ mapping:
mailing_join_addr:
type: str
- default: galaxy-announce-join@bx.psu.edu
required: false
desc: |
On the user registration form, users may choose to join a mailing list. This
is the address used to subscribe to the list. Uncomment and leave empty if you
want to remove this option from the user registration form.
+ Example value 'galaxy-announce-join@bx.psu.edu'
+
error_email_to:
type: str
required: false
@@ -807,24 +829,29 @@ mapping:
instance_resource_url:
type: str
- default: https://galaxyproject.org/
required: false
desc: |
URL of the support resource for the galaxy instance. Used in activation
emails.
+ Example value 'https://galaxyproject.org/'
+
blacklist_file:
type: str
- default: config/disposable_email_blacklist.conf
required: false
desc: |
E-mail domains blacklist is used for filtering out users that are using
disposable email address during the registration. If their address domain
matches any domain in the blacklist, they are refused the registration.
+ Example value 'config/disposable_email_blacklist.conf'
+
registration_warning_message:
type: str
- default: Please register only one account - we provide this service free of charge and have limited computational resources. Multi-accounts are tracked and will be subjected to account termination and data deletion.
+ default: >-
+ Please register only one account - we provide this service free of charge and have limited
+ computational resources. Multi-accounts are tracked and will be subjected to account
+ termination and data deletion.
required: false
desc: |
Registration warning message is used to discourage people from registering
@@ -852,7 +879,10 @@ mapping:
inactivity_box_content:
type: str
- default: Your account has not been activated yet. Feel free to browse around and see what's available, but you won't be able to upload data or run jobs until you have verified your email address.
+ default: >-
+ Your account has not been activated yet. Feel free to browse around and see what's
+ available, but you won't be able to upload data or run jobs until you have verified your
+ email address.
required: false
desc: |
Shown in warning box to users that were not activated yet.
@@ -1306,12 +1336,15 @@ mapping:
dynamic_proxy_session_map:
type: str
- default: database/session_map.sqlite
+ default: session_map.sqlite
required: false
desc: |
The NodeJS dynamic proxy can use an SQLite database or a JSON file for IPC,
set that here.
+ Default value will be resolved to 'database/session_map.sqlite' where 'database' is the
+ default value of the 'data_dir' option).
+
dynamic_proxy_bind_port:
type: int
default: 8800
@@ -1430,14 +1463,14 @@ mapping:
log_events:
type: bool
- default: true
+ default: false
required: false
desc: |
Turn on logging of application events and some user events to the database.
log_actions:
type: bool
- default: true
+ default: false
required: false
desc: |
Turn on logging of user actions to the database. Actions currently logged
@@ -1504,7 +1537,7 @@ mapping:
desc: |
Return a Access-Control-Allow-Origin response header that matches the Origin
header of the request if that Origin hostname matches one of the strings or
- regular expressions listed here. This is a comma separated list of hostname
+ regular expressions listed here. This is a comma-separated list of hostname
strings or regular expressions beginning and ending with /.
E.g. mysite.com,google.com,usegalaxy.org,/^[\w\.]*example\.com/
See: https://developer.mozilla.org/en-US/docs/Web/HTTP/CORS
@@ -1593,12 +1626,13 @@ mapping:
heartbeat_log:
type: str
- default: heartbeat_{server_name}.log
required: false
desc: |
Heartbeat log filename. Can accept the template variables {server_name} and
{pid}
+ Sample default 'heartbeat_{server_name}.log'
+
sentry_dsn:
type: str
required: false
@@ -2050,7 +2084,7 @@ mapping:
prevent users making requests to services which the administrator did not
intend to expose. Previously, you could request any network service that
Galaxy might have had access to, even if the user could not normally access it.
- It should be a comma separated list of IP addresses or IP address/mask, e.g.
+ It should be a comma-separated list of IP addresses or IP address/mask, e.g.
10.10.10.10,10.0.1.0/24,fd00::/8
enable_beta_gdpr:
@@ -2202,7 +2236,6 @@ mapping:
api_allow_run_as:
type: str
- default: ''
required: false
desc: |
Optional list of email addresses of API users who can make calls on behalf of
@@ -2226,11 +2259,14 @@ mapping:
openid_consumer_cache_path:
type: str
- default: database/openid_consumer_cache
+ default: openid_consumer_cache
required: false
desc: |
If OpenID is enabled, consumer cache directory to use.
+ Default value will be resolved to 'database/openid_consumer_cache' where 'database' is the
+ default value of the 'data_dir' option).
+
enable_tool_tags:
type: bool
default: false
@@ -2383,6 +2419,8 @@ mapping:
dependency_resolvers:
type: seq
+ sequence:
+ - type: any
desc: |
Rather than specifying a dependency_resolvers_config_file, the definition of the
resolvers to enable can be embedded into Galaxy's config with this option.
@@ -2409,6 +2447,7 @@ mapping:
default: '/_cache'
desc: See description of 'tool_dependency_cache_dir'.
default_base_path:
+ type: str
default: database/dependencies
desc: See description of 'tool_dependency_dir'.
resolvers:
@@ -2541,16 +2580,16 @@ mapping:
drmaa_external_runjob_script:
type: str
- default: sudo -E scripts/drmaa_external_runner.py --assign_all_groups
required: false
desc: |
When running DRMAA jobs as the Galaxy user
(https://docs.galaxyproject.org/en/master/admin/cluster.html#submitting-jobs-as-the-real-user)
this script is used to run the job script Galaxy generates for a tool execution.
+ Example value 'sudo -E scripts/drmaa_external_runner.py --assign_all_groups'
+
drmaa_external_killjob_script:
type: str
- default: sudo -E scripts/drmaa_external_killer.py
required: false
desc: |
When running DRMAA jobs as the Galaxy user
@@ -2558,9 +2597,10 @@ mapping:
this script is used to kill such jobs by Galaxy (e.g. if the user cancels
the job).
+ Example value 'sudo -E scripts/drmaa_external_killer.py'
+
external_chown_script:
type: str
- default: sudo -E scripts/external_chown_script.py
required: false
desc: |
When running DRMAA jobs as the Galaxy user
@@ -2568,6 +2608,8 @@ mapping:
this script is used transfer permissions back and forth between the Galaxy user
and the user that is running the job.
+ Example value 'sudo -E scripts/external_chown_script.py'
+
real_system_username:
type: str
default: user_email
@@ -2700,7 +2742,7 @@ mapping:
toolbox_filter_base_modules:
type: str
- default: galaxy.tools.toolbox.filters,galaxy.tools.filters
+ default: galaxy.tools.filters,galaxy.tools.toolbox.filters
required: false
desc: |
The base module(s) that are searched for modules for toolbox filtering
diff --git a/lib/galaxy/webapps/galaxy/controllers/admin_toolshed.py b/lib/galaxy/webapps/galaxy/controllers/admin_toolshed.py
index 0f3bf04d6328..6aa82bfd19b2 100644
--- a/lib/galaxy/webapps/galaxy/controllers/admin_toolshed.py
+++ b/lib/galaxy/webapps/galaxy/controllers/admin_toolshed.py
@@ -1129,6 +1129,8 @@ def reinstall_repository(self, trans, **kwd):
install_repository_dependencies = CheckboxField.is_checked(kwd.get('install_repository_dependencies', ''))
install_tool_dependencies = CheckboxField.is_checked(kwd.get('install_tool_dependencies', ''))
install_resolver_dependencies = CheckboxField.is_checked(kwd.get('install_resolver_dependencies', ''))
+ if not suc.have_shed_tool_conf_for_install(trans.app):
+ raise Exception("No valid shed tool configuration file available, please configure one")
shed_tool_conf, tool_path, relative_install_dir = \
suc.get_tool_panel_config_tool_path_install_dir(trans.app, tool_shed_repository)
repository_clone_url = common_util.generate_clone_url_for_installed_repository(trans.app, tool_shed_repository)
diff --git a/lib/galaxy/webapps/galaxy/controllers/user.py b/lib/galaxy/webapps/galaxy/controllers/user.py
index 99db8aff2c7e..8e204ff1c249 100644
--- a/lib/galaxy/webapps/galaxy/controllers/user.py
+++ b/lib/galaxy/webapps/galaxy/controllers/user.py
@@ -7,7 +7,10 @@
from markupsafe import escape
from six.moves.urllib.parse import unquote
-from sqlalchemy import or_
+from sqlalchemy import (
+ func,
+ or_
+)
from sqlalchemy.orm.exc import NoResultFound
from galaxy import (
@@ -19,7 +22,6 @@
from galaxy.queue_worker import send_local_control_task
from galaxy.security.validate_user_input import (
validate_email,
- validate_password,
validate_publicname
)
from galaxy.web import expose_api_anonymous_and_sessionless
@@ -133,7 +135,7 @@ def __validate_login(self, trans, payload={}, **kwd):
if not login or not password:
return self.message_exception(trans, "Please specify a username and password.")
user = trans.sa_session.query(trans.app.model.User).filter(or_(
- trans.app.model.User.table.c.email == login,
+ func.lower(trans.app.model.User.table.c.email) == login.lower(),
trans.app.model.User.table.c.username == login
)).first()
log.debug("trans.app.config.auth_config_file: %s" % trans.app.config.auth_config_file)
@@ -307,12 +309,6 @@ def reset_password(self, trans, payload={}, **kwd):
return self.message_exception(trans, message)
return {"message": "Reset link has been sent to your email."}
- def __validate(self, trans, email, password, confirm, username):
- message = "\n".join([validate_email(trans, email),
- validate_password(trans, password, confirm),
- validate_publicname(trans, username)]).rstrip()
- return message
-
def __get_redirect_url(self, redirect):
if not redirect or redirect == "None":
return None
diff --git a/lib/galaxy/webapps/reports/config.py b/lib/galaxy/webapps/reports/config.py
index 7f44096bb9ac..fe536956fada 100644
--- a/lib/galaxy/webapps/reports/config.py
+++ b/lib/galaxy/webapps/reports/config.py
@@ -61,7 +61,7 @@ def __init__(self, **kwargs):
self.redact_email_in_job_name = True
self.allow_user_deletion = True
- def get(self, key, default):
+ def get(self, key, default=None):
return self.config_dict.get(key, default)
def check(self):
diff --git a/lib/galaxy/webapps/tool_shed/api/users.py b/lib/galaxy/webapps/tool_shed/api/users.py
index ba68ab249917..1216052904ee 100644
--- a/lib/galaxy/webapps/tool_shed/api/users.py
+++ b/lib/galaxy/webapps/tool_shed/api/users.py
@@ -123,13 +123,9 @@ def show(self, trans, id, **kwd):
return user_dict
def __validate(self, trans, email, password, confirm, username):
- if not username:
- return "A public user name is required in the Tool Shed."
if username in ['repos']:
- return "The term %s is a reserved word in the Tool Shed, so it cannot be used as a public user name." % username
- message = validate_email(trans, email)
- if not message:
- message = validate_password(trans, password, confirm)
- if not message and username:
- message = validate_publicname(trans, username)
+ return "The term '%s' is a reserved word in the Tool Shed, so it cannot be used as a public user name." % username
+ message = "\n".join([validate_email(trans, email),
+ validate_password(trans, password, confirm),
+ validate_publicname(trans, username)]).rstrip()
return message
diff --git a/lib/galaxy/webapps/tool_shed/app.py b/lib/galaxy/webapps/tool_shed/app.py
index 785e480b05a3..a1a1aad79985 100644
--- a/lib/galaxy/webapps/tool_shed/app.py
+++ b/lib/galaxy/webapps/tool_shed/app.py
@@ -64,6 +64,7 @@ def __init__(self, **kwd):
from galaxy.managers.citations import CitationsManager
self.citations_manager = CitationsManager(self)
# The Tool Shed makes no use of a Galaxy toolbox, but this attribute is still required.
+ self.use_tool_dependency_resolution = False
self.toolbox = tools.ToolBox([], self.config.tool_path, self)
# Initialize the Tool Shed security agent.
self.security_agent = self.model.security_agent
diff --git a/lib/galaxy/webapps/tool_shed/config.py b/lib/galaxy/webapps/tool_shed/config.py
index 52e91303fe31..0eebee43fe90 100644
--- a/lib/galaxy/webapps/tool_shed/config.py
+++ b/lib/galaxy/webapps/tool_shed/config.py
@@ -185,7 +185,7 @@ def parse_config_file_options(self, kwargs):
# Backwards compatibility for names used in too many places to fix
self.datatypes_config = self.datatypes_config_file
- def get(self, key, default):
+ def get(self, key, default=None):
return self.config_dict.get(key, default)
def get_bool(self, key, default):
diff --git a/lib/galaxy/webapps/tool_shed/controllers/user.py b/lib/galaxy/webapps/tool_shed/controllers/user.py
index 12d04e0e8409..6e6f1d8f4039 100644
--- a/lib/galaxy/webapps/tool_shed/controllers/user.py
+++ b/lib/galaxy/webapps/tool_shed/controllers/user.py
@@ -9,8 +9,8 @@
web
)
from galaxy.security.validate_user_input import (
- transform_publicname,
validate_email,
+ validate_password,
validate_publicname
)
from galaxy.web import url_for
@@ -153,7 +153,7 @@ def create(self, trans, cntrller='user', redirect_url='', refresh_frames=[], **k
return trans.fill_template('/webapps/tool_shed/user/register.mako',
cntrller=cntrller,
email=email,
- username=transform_publicname(trans, username),
+ username=username,
subscribe_checked=subscribe_checked,
show_user_prepopulate_form=show_user_prepopulate_form,
use_panels=use_panels,
@@ -454,9 +454,9 @@ def logout(self, trans, logout_all=False, **kwd):
active_view="user")
def __validate(self, trans, email, password, confirm, username):
- # If coming from the tool shed webapp, we'll require a public user name
- if not username:
- return "A public user name is required in the tool shed."
if username in ['repos']:
- return "The term %s is a reserved word in the tool shed, so it cannot be used as a public user name." % escape(username)
- return super(User, self).__validate(trans, email, password, confirm, username)
+ return "The term '%s' is a reserved word in the Tool Shed, so it cannot be used as a public user name." % username
+ message = "\n".join([validate_email(trans, email),
+ validate_password(trans, password, confirm),
+ validate_publicname(trans, username)]).rstrip()
+ return message
diff --git a/lib/galaxy/webapps/tool_shed/model/__init__.py b/lib/galaxy/webapps/tool_shed/model/__init__.py
index 052fc043669a..e20c028b66fe 100644
--- a/lib/galaxy/webapps/tool_shed/model/__init__.py
+++ b/lib/galaxy/webapps/tool_shed/model/__init__.py
@@ -163,9 +163,9 @@ def __init__(self,
class Repository(Dictifiable):
dict_collection_visible_keys = ['id', 'name', 'type', 'remote_repository_url', 'homepage_url', 'description', 'user_id', 'private', 'deleted',
- 'times_downloaded', 'deprecated']
+ 'times_downloaded', 'deprecated', 'create_time']
dict_element_visible_keys = ['id', 'name', 'type', 'remote_repository_url', 'homepage_url', 'description', 'long_description', 'user_id', 'private',
- 'deleted', 'times_downloaded', 'deprecated']
+ 'deleted', 'times_downloaded', 'deprecated', 'create_time', 'ratings', 'reviews', 'reviewers']
file_states = Bunch(NORMAL='n',
NEEDS_MERGING='m',
MARKED_FOR_REMOVAL='r',
diff --git a/lib/galaxy/webapps/tool_shed/search/repo_search.py b/lib/galaxy/webapps/tool_shed/search/repo_search.py
index ce0a575070b9..94ab0793b7cb 100644
--- a/lib/galaxy/webapps/tool_shed/search/repo_search.py
+++ b/lib/galaxy/webapps/tool_shed/search/repo_search.py
@@ -7,7 +7,7 @@
from whoosh import scoring
from whoosh.fields import KEYWORD, Schema, STORED, TEXT
from whoosh.qparser import MultifieldParser
-from whoosh.query import And, Term
+from whoosh.query import And, Every, Term
from galaxy import exceptions
from galaxy.exceptions import ObjectNotFound
@@ -15,7 +15,6 @@
if sys.version_info > (3,):
long = int
-RESERVED_SEARCH_TERMS = ["category", "owner"]
log = logging.getLogger(__name__)
schema = Schema(
@@ -74,8 +73,13 @@ def search(self, trans, search_term, page, page_size, boosts):
:param page_size: integer defining a length of one page
:param page: integer with the number of page requested
- :returns results: dictionary containing hits themselves and the number of hits
+ :returns results: dictionary containing hits themselves and the hits summary
"""
+ log.debug('raw search query: #' + str(search_term))
+ lower_search_term = search_term.lower()
+ allow_query, search_term_without_filters = self._parse_reserved_filters(lower_search_term)
+ log.debug('term without filters: #' + str(search_term_without_filters))
+
whoosh_index_dir = trans.app.config.whoosh_index_dir
index_exists = whoosh.index.exists_in(whoosh_index_dir)
if index_exists:
@@ -92,11 +96,7 @@ def search(self, trans, search_term, page, page_size, boosts):
'remote_repository_url_B' : boosts.repo_remote_repository_url_boost,
'repo_owner_username_B' : boosts.repo_owner_username_boost,
'categories_B' : boosts.categories_boost})
-
searcher = index.searcher(weighting=repo_weighting)
-
- allow_query, search_term_without_filters = self._parse_reserved_filters(search_term)
-
parser = MultifieldParser([
'name',
'description',
@@ -105,17 +105,20 @@ def search(self, trans, search_term, page, page_size, boosts):
'remote_repository_url',
'repo_owner_username',
'categories'], schema=schema)
- user_query = parser.parse('*' + search_term_without_filters + '*')
+ # If user query has just filters prevent wildcard search.
+ if len(search_term_without_filters) < 1:
+ user_query = Every('name')
+ sortedby = 'name'
+ else:
+ user_query = parser.parse('*' + search_term_without_filters + '*')
+ sortedby = ''
try:
- hits = searcher.search_page(user_query, page, pagelen=page_size, filter=allow_query, terms=True)
+ hits = searcher.search_page(user_query, page, pagelen=page_size, filter=allow_query, terms=True, sortedby=sortedby)
+ log.debug('total hits: ' + str(len(hits)))
+ log.debug('scored hits: ' + str(hits.scored_length()))
except ValueError:
raise ObjectNotFound('The requested page does not exist.')
-
- log.debug('user search query: #' + str(search_term))
- log.debug('term without filters: #' + str(search_term_without_filters))
- log.debug('total hits: ' + str(len(hits)))
- log.debug('scored hits: ' + str(hits.scored_length()))
results = {}
results['total_results'] = str(len(hits))
results['page'] = str(page)
@@ -155,7 +158,7 @@ def _parse_reserved_filters(self, search_term):
:returns allow_query: whoosh Query object used for filtering
results of searching in index
:returns search_term_without_filters: str that represents user's
- search phrase without the wildcards
+ search phrase without the filters
>>> rs = RepoSearch()
>>> rs._parse_reserved_filters("category:assembly")
@@ -166,6 +169,10 @@ def _parse_reserved_filters(self, search_term):
(And([Term('categories', 'Climate Analysis')]), 'psy_maps')
>>> rs._parse_reserved_filters("climate category:'Climate Analysis' owner:'bjoern gruening' psy_maps")
(And([Term('categories', 'Climate Analysis'), Term('repo_owner_username', 'bjoern gruening')]), 'climate psy_maps')
+ >>> rs._parse_reserved_filters("climate category:'John Says This Fails' owner:'bjoern gruening' psy_maps")
+ (And([Term('categories', 'John Says This Fails'), Term('repo_owner_username', 'bjoern gruening')]), 'climate psy_maps')
+ >>> rs._parse_reserved_filters("climate o:'bjoern gruening' middle strings c:'John Says This Fails' psy_maps")
+ (And([Term('repo_owner_username', 'bjoern gruening'), Term('categories', 'John Says This Fails')]), 'climate middle strings psy_maps')
>>> rs._parse_reserved_filters("abyss category:assembly")
(And([Term('categories', 'assembly')]), 'abyss')
>>> rs._parse_reserved_filters("abyss category:assembly greg")
@@ -177,28 +184,19 @@ def _parse_reserved_filters(self, search_term):
>>> rs._parse_reserved_filters("meaningoflife:42")
(None, 'meaningoflife:42')
"""
- allow_query = None
allow_terms = []
- # Split query string on spaces that are not followed by singlequote_space_
- # to allow for quoting filtering values. Also unify double and single quotes into single quotes.
- search_term_chunks = re.split(r"\s+(?!\w+'\s)", search_term.replace('"', "'"), re.MULTILINE)
- reserved_terms = []
- for term_chunk in search_term_chunks:
- if ":" in term_chunk:
- reserved_filter = term_chunk.split(":")[0]
- # Remove the quotes used for delimiting values with space(s)
- reserved_filter_value = term_chunk.split(":")[1].replace("'", "")
- if reserved_filter in RESERVED_SEARCH_TERMS:
- reserved_terms.append(term_chunk)
- if reserved_filter == "category":
- allow_terms.append(Term('categories', reserved_filter_value))
- elif reserved_filter == "owner":
- allow_terms.append(Term('repo_owner_username', reserved_filter_value))
- else:
- pass # Treat unrecognized filter as normal search term.
- if allow_terms:
- allow_query = And(allow_terms)
- search_term_without_filters = " ".join([chunk for chunk in search_term_chunks if chunk not in reserved_terms])
- else:
- search_term_without_filters = search_term
+ search_term_without_filters = None
+ search_space = search_term.replace('"', "'")
+ reserved = re.compile(r"(category|c|owner|o):(\w+|\'.*?\')")
+ while True:
+ match = reserved.search(search_space)
+ if match is None:
+ search_term_without_filters = ' '.join(search_space.split())
+ break
+ if match.groups()[0] in ["category", "c"]:
+ allow_terms.append(Term('categories', match.groups()[1].strip().replace("'", "")))
+ elif match.groups()[0] in ["owner", "o"]:
+ allow_terms.append(Term('repo_owner_username', match.groups()[1].strip().replace("'", "")))
+ search_space = search_space[0:match.start()] + search_space[match.end():]
+ allow_query = And(allow_terms) if allow_terms else None
return allow_query, search_term_without_filters
diff --git a/lib/tool_shed/galaxy_install/install_manager.py b/lib/tool_shed/galaxy_install/install_manager.py
index 20605e8fce1b..ff8f75d32609 100644
--- a/lib/tool_shed/galaxy_install/install_manager.py
+++ b/lib/tool_shed/galaxy_install/install_manager.py
@@ -722,11 +722,11 @@ def __initiate_and_install_repositories(self, tool_shed_url, repository_revision
install_repository_dependencies = install_options.get('install_repository_dependencies', False)
install_resolver_dependencies = install_options.get('install_resolver_dependencies', False)
install_tool_dependencies = install_options.get('install_tool_dependencies', False)
- if install_tool_dependencies:
- self.__assert_can_install_dependencies()
new_tool_panel_section_label = install_options.get('new_tool_panel_section_label', '')
tool_panel_section_mapping = install_options.get('tool_panel_section_mapping', {})
shed_tool_conf = install_options.get('shed_tool_conf', None)
+ if install_tool_dependencies and self.app.tool_dependency_dir is None:
+ raise exceptions.ConfigDoesNotAllowException("Tool dependency installation is disabled in your configuration files.")
if shed_tool_conf:
# Get the tool_path setting.
shed_conf_dict = self.tpm.get_shed_tool_conf_dict(shed_tool_conf)
@@ -992,13 +992,6 @@ def update_tool_shed_repository_status(self, tool_shed_repository, status, error
self.install_model.context.add(tool_shed_repository)
self.install_model.context.flush()
- def __assert_can_install_dependencies(self):
- if self.app.tool_dependency_dir is None:
- no_tool_dependency_dir_message = "Tool dependencies can be automatically installed only if you set "
- no_tool_dependency_dir_message += "the value of your 'tool_dependency_dir' setting in your Galaxy "
- no_tool_dependency_dir_message += "configuration file (galaxy.ini) and restart your Galaxy server. "
- raise exceptions.ConfigDoesNotAllowException(no_tool_dependency_dir_message)
-
class RepositoriesInstalledException(exceptions.RequestParameterInvalidException):
diff --git a/lib/tool_shed/galaxy_install/installed_repository_manager.py b/lib/tool_shed/galaxy_install/installed_repository_manager.py
index c8d8cb05aafc..5365c76323f4 100644
--- a/lib/tool_shed/galaxy_install/installed_repository_manager.py
+++ b/lib/tool_shed/galaxy_install/installed_repository_manager.py
@@ -40,8 +40,6 @@ def __init__(self, app):
self.install_model = self.app.install_model
self.context = self.install_model.context
self.tool_configs = self.app.config.tool_configs
- if self.app.config.migrated_tools_config not in self.tool_configs:
- self.tool_configs.append(self.app.config.migrated_tools_config)
self.tool_trees = []
for tool_config in self.tool_configs:
diff --git a/lib/tool_shed/galaxy_install/tools/data_manager.py b/lib/tool_shed/galaxy_install/tools/data_manager.py
index 279e90b72d63..aa4d0f227f81 100644
--- a/lib/tool_shed/galaxy_install/tools/data_manager.py
+++ b/lib/tool_shed/galaxy_install/tools/data_manager.py
@@ -59,7 +59,7 @@ def install_data_managers(self, shed_data_manager_conf_filename, metadata_dict,
repository_tools_by_guid[tool_tup[1]] = dict(tool_config_filename=tool_tup[0], tool=tool_tup[2])
# Load existing data managers.
try:
- tree, error_message = xml_util.parse_xml(shed_data_manager_conf_filename)
+ tree, error_message = xml_util.parse_xml(shed_data_manager_conf_filename, check_exists=False)
except (OSError, IOError) as exc:
if exc.errno == errno.ENOENT:
with open(shed_data_manager_conf_filename, 'w') as fh:
diff --git a/lib/tool_shed/galaxy_install/tools/tool_panel_manager.py b/lib/tool_shed/galaxy_install/tools/tool_panel_manager.py
index aa94f8386d69..38e1fa593c5d 100644
--- a/lib/tool_shed/galaxy_install/tools/tool_panel_manager.py
+++ b/lib/tool_shed/galaxy_install/tools/tool_panel_manager.py
@@ -32,14 +32,18 @@ def add_to_shed_tool_config(self, shed_tool_conf_dict, elem_list):
shed_tool_conf = shed_tool_conf_dict['config_filename']
tool_path = shed_tool_conf_dict['tool_path']
config_elems = []
+ # Ideally shed_tool_conf.xml would be created before the repo is cloned and added to the DB, but this is called
+ # from too many places to make it feasible at this time
try:
- tree, error_message = xml_util.parse_xml(shed_tool_conf)
+ tree, error_message = xml_util.parse_xml(shed_tool_conf, check_exists=False)
except (OSError, IOError) as exc:
if (exc.errno == errno.ENOENT and shed_tool_conf_dict.get('create', None) is not None):
+ log.info('Creating shed tool config with default contents: %s', shed_tool_conf)
with open(shed_tool_conf, 'w') as fh:
fh.write(shed_tool_conf_dict['create'])
tree, error_message = xml_util.parse_xml(shed_tool_conf)
else:
+ log.error('Unable to load shed tool config: %s', shed_tool_conf)
raise
if tree:
root = tree.getroot()
@@ -51,6 +55,8 @@ def add_to_shed_tool_config(self, shed_tool_conf_dict, elem_list):
# Persist the altered shed_tool_config file.
self.config_elems_to_xml_file(config_elems, shed_tool_conf, tool_path)
self.app.wait_for_toolbox_reload(old_toolbox)
+ else:
+ log.error(error_message)
def add_to_tool_panel(self, repository_name, repository_clone_url, changeset_revision, repository_tools_tups, owner,
shed_tool_conf, tool_panel_dict, new_install=True, tool_panel_section_mapping={}):
diff --git a/lib/tool_shed/metadata/metadata_generator.py b/lib/tool_shed/metadata/metadata_generator.py
index a37ddac7b0c1..30026ab8c462 100644
--- a/lib/tool_shed/metadata/metadata_generator.py
+++ b/lib/tool_shed/metadata/metadata_generator.py
@@ -359,21 +359,25 @@ def generate_metadata_for_changeset_revision(self):
metadata_dict['sample_files'] = sample_file_metadata_paths
# Copy all sample files included in the repository to a single directory location so we
# can load tools that depend on them.
+ data_table_conf_xml_sample_files = []
for sample_file in sample_file_copy_paths:
tool_util.copy_sample_file(self.app, sample_file, dest_path=work_dir)
# If the list of sample files includes a tool_data_table_conf.xml.sample file, load
# its table elements into memory.
relative_path, filename = os.path.split(sample_file)
if filename == 'tool_data_table_conf.xml.sample':
- # We create a new ToolDataTableManager to avoid adding entries to the app-wide
- # tool data tables. This is only used for checking that the data table is valid.
- new_table_elems, error_message = \
- validation_context.tool_data_tables.add_new_entries_from_config_file(config_filename=sample_file,
- tool_data_path=work_dir,
- shed_tool_data_table_config=work_dir,
- persist=False)
- if error_message:
- self.invalid_file_tups.append((filename, error_message))
+ data_table_conf_xml_sample_files.append(sample_file)
+
+ for data_table_conf_xml_sample_file in data_table_conf_xml_sample_files:
+ # We create a new ToolDataTableManager to avoid adding entries to the app-wide
+ # tool data tables. This is only used for checking that the data table is valid.
+ new_table_elems, error_message = \
+ validation_context.tool_data_tables.add_new_entries_from_config_file(config_filename=data_table_conf_xml_sample_file,
+ tool_data_path=work_dir,
+ shed_tool_data_table_config=work_dir,
+ persist=False)
+ if error_message:
+ self.invalid_file_tups.append((filename, error_message))
for root, dirs, files in os.walk(files_dir):
if root.find('.hg') < 0 and root.find('hgrc') < 0:
if '.hg' in dirs:
diff --git a/lib/tool_shed/util/repository_util.py b/lib/tool_shed/util/repository_util.py
index b51abff9f9c9..3694795b36e2 100644
--- a/lib/tool_shed/util/repository_util.py
+++ b/lib/tool_shed/util/repository_util.py
@@ -1047,7 +1047,7 @@ def validate_repository_name(app, name, user):
if name in ['None', None, '']:
return 'Enter the required repository name.'
if name in ['repos']:
- return "The term %s is a reserved word in the tool shed, so it cannot be used as a repository name." % name
+ return "The term '%s' is a reserved word in the Tool Shed, so it cannot be used as a repository name." % name
check_existing = get_repository_by_name_and_owner(app, name, user.username)
if check_existing is not None:
if check_existing.deleted:
diff --git a/run_tests.sh b/run_tests.sh
index 884015751a95..b95451d8daef 100755
--- a/run_tests.sh
+++ b/run_tests.sh
@@ -312,11 +312,17 @@ then
echo "Docker version:"
docker --version
echo "Launching docker container for testing with extra args ${DOCKER_RUN_EXTRA_ARGS}..."
+ name=$(python -c 'import re; import uuid; print re.sub("-","",str(uuid.uuid4()))')
+ _on_exit() {
+ docker kill $name
+ }
+ trap _on_exit 0
docker $DOCKER_EXTRA_ARGS run $DOCKER_RUN_EXTRA_ARGS \
-e "BUILD_NUMBER=$BUILD_NUMBER" \
-e "GALAXY_TEST_DATABASE_TYPE=$db_type" \
-e "LC_ALL=C" \
--rm \
+ --name=$name \
-v "$(pwd)":/galaxy \
-v "$(pwd)"/test/docker/base/run_test_wrapper.sh:/usr/local/bin/run_test_wrapper.sh "$DOCKER_IMAGE" "$@"
exit $?
diff --git a/scripts/grt/export.py b/scripts/grt/export.py
index 0a8c3f9e2b23..32dc4641ddb9 100644
--- a/scripts/grt/export.py
+++ b/scripts/grt/export.py
@@ -4,6 +4,7 @@
See doc/source/admin/grt.rst for more detailed usage information.
"""
import argparse
+import io
import json
import logging
import os
@@ -19,7 +20,10 @@
import galaxy.app
import galaxy.config
from galaxy.objectstore import build_object_store_from_config
-from galaxy.util import hash_util
+from galaxy.util import (
+ hash_util,
+ unicodify
+)
from galaxy.util.script import app_properties_from_args, populate_config_args
sample_config = os.path.abspath(os.path.join(os.path.dirname(__file__), 'grt.yml.sample'))
@@ -142,7 +146,7 @@ def annotate(label, human_label=None):
blacklisted_tools = config['sanitization']['tools']
annotate('export_jobs_start', 'Exporting Jobs')
- handle_job = open(REPORT_BASE + '.jobs.tsv', 'w')
+ handle_job = io.open(REPORT_BASE + '.jobs.tsv', 'w', encoding='utf-8')
handle_job.write('\t'.join(('id', 'tool_id', 'tool_version', 'state', 'create_time')) + '\n')
for offset_start in range(last_job_sent, end_job_id, args.batch_size):
logging.debug("Processing %s:%s", offset_start, min(end_job_id, offset_start + args.batch_size))
@@ -155,16 +159,15 @@ def annotate(label, human_label=None):
continue
try:
- handle_job.write(str(job[0])) # id
- handle_job.write('\t')
- handle_job.write(job[2]) # tool_id
- handle_job.write('\t')
- handle_job.write(job[3]) # tool_version
- handle_job.write('\t')
- handle_job.write(job[4]) # state
- handle_job.write('\t')
- handle_job.write(str(job[5])) # create_time
- handle_job.write('\n')
+ line = [
+ str(job[0]), # id
+ job[2], # tool_id
+ job[3], # tool_version
+ job[4], # state
+ str(job[5]) # create_time
+ ]
+ cline = unicodify('\t'.join(line) + '\n')
+ handle_job.write(cline)
except Exception:
logging.warning("Unable to write out a 'handle_job' row. Ignoring the row.", exc_info=True)
continue
@@ -177,7 +180,7 @@ def annotate(label, human_label=None):
annotate('export_jobs_end')
annotate('export_datasets_start', 'Exporting Datasets')
- handle_datasets = open(REPORT_BASE + '.datasets.tsv', 'w')
+ handle_datasets = io.open(REPORT_BASE + '.datasets.tsv', 'w', encoding='utf-8')
handle_datasets.write('\t'.join(('job_id', 'dataset_id', 'extension', 'file_size', 'param_name', 'type')) + '\n')
for offset_start in range(last_job_sent, end_job_id, args.batch_size):
logging.debug("Processing %s:%s", offset_start, min(end_job_id, offset_start + args.batch_size))
@@ -241,18 +244,16 @@ def annotate(label, human_label=None):
continue
try:
- handle_datasets.write(str(job[0]))
- handle_datasets.write('\t')
- handle_datasets.write(str(hda_id))
- handle_datasets.write('\t')
- handle_datasets.write(str(hdas[hda_id][1]))
- handle_datasets.write('\t')
- handle_datasets.write(round_to_2sd(datasets[dataset_id][0]))
- handle_datasets.write('\t')
- handle_datasets.write(str(job[2]))
- handle_datasets.write('\t')
- handle_datasets.write(str(filetype))
- handle_datasets.write('\n')
+ line = [
+ str(job[0]), # Job ID
+ str(hda_id), # HDA ID
+ str(hdas[hda_id][1]), # Extension
+ round_to_2sd(datasets[dataset_id][0]), # File size
+ job[2], # Parameter name
+ str(filetype) # input/output
+ ]
+ cline = unicodify('\t'.join(line) + '\n')
+ handle_datasets.write(cline)
except Exception:
logging.warning("Unable to write out a 'handle_datasets' row. Ignoring the row.", exc_info=True)
continue
@@ -260,7 +261,7 @@ def annotate(label, human_label=None):
annotate('export_datasets_end')
annotate('export_metric_num_start', 'Exporting Metrics (Numeric)')
- handle_metric_num = open(REPORT_BASE + '.metric_num.tsv', 'w')
+ handle_metric_num = io.open(REPORT_BASE + '.metric_num.tsv', 'w', encoding='utf-8')
handle_metric_num.write('\t'.join(('job_id', 'plugin', 'name', 'value')) + '\n')
for offset_start in range(last_job_sent, end_job_id, args.batch_size):
logging.debug("Processing %s:%s", offset_start, min(end_job_id, offset_start + args.batch_size))
@@ -276,14 +277,15 @@ def annotate(label, human_label=None):
continue
try:
- handle_metric_num.write(str(metric[0]))
- handle_metric_num.write('\t')
- handle_metric_num.write(metric[1])
- handle_metric_num.write('\t')
- handle_metric_num.write(metric[2])
- handle_metric_num.write('\t')
- handle_metric_num.write(str(metric[3]))
- handle_metric_num.write('\n')
+ line = [
+ str(metric[0]), # job id
+ metric[1], # plugin
+ metric[2], # name
+ str(metric[3]) # value
+ ]
+
+ cline = unicodify('\t'.join(line) + '\n')
+ handle_metric_num.write(cline)
except Exception:
logging.warning("Unable to write out a 'handle_metric_num' row. Ignoring the row.", exc_info=True)
continue
@@ -303,7 +305,7 @@ def annotate(label, human_label=None):
os.unlink(REPORT_BASE + '.' + name + '.tsv')
_times.append(('job_finish', time.time() - _start_time))
- sha = hash_util.memory_bound_hexdigest(hash_util.sha256, REPORT_BASE + ".tar.gz")
+ sha = hash_util.memory_bound_hexdigest(hash_func=hash_util.sha256, path=REPORT_BASE + ".tar.gz")
_times.append(('hash_finish', time.time() - _start_time))
# Now serialize the individual report data.
diff --git a/scripts/microbes/README.txt b/scripts/microbes/README.txt
index 7904f20d09d6..1f1abecad269 100644
--- a/scripts/microbes/README.txt
+++ b/scripts/microbes/README.txt
@@ -1,24 +1,24 @@
-The Scripts found in this directory are used to download and generate the files relating to Galaxy as a Microbial Resource.
-This includes scripts to access the NCBI Genome Projects site, download relevent data, and convert to a form usable in Galaxy. Data is generated for the Microbial Datasource Tool, as well as for Galaxy Interval Operations (chromosome names and lengths), and also the extract Genomic DNA tool. Information about organisms is also written into '.info' files found in each organism directory.
-
-Step 3 requires a binary 'faToNib' to properly generate sequence files.
-
-Steps should be performed in the order they appear here.
-
-(1.) To Download and process Genome Projects from NCBI into a form usable by Galaxy:
- python /GALAXY_ROOT/scripts/microbes/harvest_bacteria.py /OUTPUT/DIRECTORY/microbes/ > /OUTPUT/DIRECTORY/harvest.txt
-
-(2.) To Walk downloaded Genome Projects and Convert, in place, IDs to match the UCSC Archaea browser, where applicable:
- python /GALAXY_ROOT/scripts/microbes/ncbi_to_ucsc.py /OUTPUT/DIRECTORY/microbes/ > /OUTPUT/DIRECTORY/ncbi_to_ucsc.txt
-
-(3.) To create nib files (for extraction) and to generate the location file content for Microbes used for extracting Genomic DNA:
- python /GALAXY_ROOT/scripts/microbes/create_nib_seq_loc_file.py /OUTPUT/DIRECTORY/microbes/ seq.loc > /OUTPUT/DIRECTORY/sequence.txt
-
-(4.) To create the location file for the Microbial Data Resource tool in Galaxy:
- python /GALAXY_ROOT/scripts/microbes/create_bacteria_loc_file.py /OUTPUT/DIRECTORY/microbes/ > /OUTPUT/DIRECTORY/microbial_data.loc
-
-(5.) To Generate a single file containing the lengths for each chromosome for each species, to be added to 'manual_builds.txt':
- python /GALAXY_ROOT/scripts/microbes/get_builds_lengths.py /OUTPUT/DIRECTORY/microbes/ > /OUTPUT/DIRECTORY/microbes.len
-
-(6.) To Create the Wiki Table listing available Microbial Data in Galaxy:
+The Scripts found in this directory are used to download and generate the files relating to Galaxy as a Microbial Resource.
+This includes scripts to access the NCBI Genome Projects site, download relevent data, and convert to a form usable in Galaxy. Data is generated for the Microbial Datasource Tool, as well as for Galaxy Interval Operations (chromosome names and lengths), and also the extract Genomic DNA tool. Information about organisms is also written into '.info' files found in each organism directory.
+
+Step 3 requires a binary 'faToNib' to properly generate sequence files.
+
+Steps should be performed in the order they appear here.
+
+(1.) To Download and process Genome Projects from NCBI into a form usable by Galaxy:
+ python /GALAXY_ROOT/scripts/microbes/harvest_bacteria.py /OUTPUT/DIRECTORY/microbes/ > /OUTPUT/DIRECTORY/harvest.txt
+
+(2.) To Walk downloaded Genome Projects and Convert, in place, IDs to match the UCSC Archaea browser, where applicable:
+ python /GALAXY_ROOT/scripts/microbes/ncbi_to_ucsc.py /OUTPUT/DIRECTORY/microbes/ > /OUTPUT/DIRECTORY/ncbi_to_ucsc.txt
+
+(3.) To create nib files (for extraction) and to generate the location file content for Microbes used for extracting Genomic DNA:
+ python /GALAXY_ROOT/scripts/microbes/create_nib_seq_loc_file.py /OUTPUT/DIRECTORY/microbes/ seq.loc > /OUTPUT/DIRECTORY/sequence.txt
+
+(4.) To create the location file for the Microbial Data Resource tool in Galaxy:
+ python /GALAXY_ROOT/scripts/microbes/create_bacteria_loc_file.py /OUTPUT/DIRECTORY/microbes/ > /OUTPUT/DIRECTORY/microbial_data.loc
+
+(5.) To Generate a single file containing the lengths for each chromosome for each species, to be added to 'manual_builds.txt':
+ python /GALAXY_ROOT/scripts/microbes/get_builds_lengths.py /OUTPUT/DIRECTORY/microbes/ > /OUTPUT/DIRECTORY/microbes.len
+
+(6.) To Create the Wiki Table listing available Microbial Data in Galaxy:
python /GALAXY_ROOT/scripts/microbes/create_bacteria_table.py /OUTPUT/DIRECTORY/microbes/ > /OUTPUT/DIRECTORY/microbes.table
\ No newline at end of file
diff --git a/scripts/tool_shed/build_ts_whoosh_index.py b/scripts/tool_shed/build_ts_whoosh_index.py
index 591d56b5e395..3dc25714e799 100644
--- a/scripts/tool_shed/build_ts_whoosh_index.py
+++ b/scripts/tool_shed/build_ts_whoosh_index.py
@@ -130,7 +130,7 @@ def get_repos(sa_session, path_to_repositories, hgweb_config_dir):
category_names = []
for rca in sa_session.query(model.RepositoryCategoryAssociation).filter(model.RepositoryCategoryAssociation.repository_id == repo.id):
for category in sa_session.query(model.Category).filter(model.Category.id == rca.category.id):
- category_names.append(category.name)
+ category_names.append(category.name.lower())
categories = (",").join(category_names)
repo_id = repo.id
name = repo.name
@@ -146,7 +146,7 @@ def get_repos(sa_session, path_to_repositories, hgweb_config_dir):
repo_owner_username = ''
if repo.user_id is not None:
user = sa_session.query(model.User).filter(model.User.id == repo.user_id).one()
- repo_owner_username = user.username
+ repo_owner_username = user.username.lower()
approved = 'no'
for review in repo.reviews:
diff --git a/test/api/test_dataset_collections.py b/test/api/test_dataset_collections.py
index 946114e51af1..a719af89ac12 100644
--- a/test/api/test_dataset_collections.py
+++ b/test/api/test_dataset_collections.py
@@ -2,7 +2,7 @@
import tarfile
from base import api
-from base.populators import DatasetCollectionPopulator, DatasetPopulator
+from base.populators import DatasetCollectionPopulator, DatasetPopulator, skip_if_github_down
from six import BytesIO
@@ -233,6 +233,7 @@ def test_upload_nested(self):
element0 = hdca["elements"][0]
assert element0["element_identifier"] == "samp1"
+ @skip_if_github_down
def test_upload_collection_from_url(self):
elements = [{"src": "url", "url": "https://raw.githubusercontent.com/galaxyproject/galaxy/dev/test-data/4.bed", "info": "my cool bed"}]
targets = [{
diff --git a/test/api/test_tools.py b/test/api/test_tools.py
index 43c050e6d3e0..a4c4bb817f83 100644
--- a/test/api/test_tools.py
+++ b/test/api/test_tools.py
@@ -66,6 +66,15 @@ def test_no_panel_index(self):
tool_ids = [_["id"] for _ in tools_index]
assert "upload1" in tool_ids
+ @skip_without_tool("test_sam_to_bam_conversions")
+ def test_requirements(self):
+ requirements_response = self._get("tools/%s/requirements" % "test_sam_to_bam_conversions", admin=True)
+ self._assert_status_code_is_ok(requirements_response)
+ requirements = requirements_response.json()
+ assert len(requirements) == 1, requirements
+ requirement = requirements[0]
+ assert requirement['name'] == 'samtools', requirement
+
@skip_without_tool("cat1")
def test_show_repeat(self):
tool_info = self._show_valid_tool("cat1")
diff --git a/test/api/test_tools_upload.py b/test/api/test_tools_upload.py
index 9366f219e79e..a6cde5cb9ae8 100644
--- a/test/api/test_tools_upload.py
+++ b/test/api/test_tools_upload.py
@@ -8,6 +8,7 @@
)
from base.populators import (
DatasetPopulator,
+ skip_if_site_down,
skip_without_datatype,
uses_test_history,
)
@@ -484,11 +485,13 @@ def test_upload_force_composite(self):
assert extra_file["path"] == "composite"
assert extra_file["class"] == "File"
+ @skip_if_site_down("https://usegalaxy.org")
def test_upload_from_invalid_url(self):
history_id, new_dataset = self._upload('https://usegalaxy.org/bla123', assert_ok=False)
dataset_details = self.dataset_populator.get_history_dataset_details(history_id, dataset_id=new_dataset["id"], assert_ok=False)
assert dataset_details['state'] == 'error', "expected dataset state to be 'error', but got '%s'" % dataset_details['state']
+ @skip_if_site_down("https://usegalaxy.org")
def test_upload_from_valid_url(self):
history_id, new_dataset = self._upload('https://usegalaxy.org/api/version')
self.dataset_populator.get_history_dataset_details(history_id, dataset_id=new_dataset["id"], assert_ok=True)
diff --git a/test/api/test_workflows.py b/test/api/test_workflows.py
index e1a063278e8a..f0ac9a5017c4 100644
--- a/test/api/test_workflows.py
+++ b/test/api/test_workflows.py
@@ -1527,6 +1527,30 @@ def test_workflow_run_input_mapping_with_output_collections(self):
elements0 = elements[0]
assert elements0["element_identifier"] == "el1"
+ self.wait_for_invocation_and_jobs(history_id, workflow_id, invocation_id)
+
+ jobs_summary_response = self._get("workflows/%s/invocations/%s/jobs_summary" % (workflow_id, invocation_id))
+ self._assert_status_code_is(jobs_summary_response, 200)
+ jobs_summary = jobs_summary_response.json()
+ assert 'states' in jobs_summary
+
+ invocation_states = jobs_summary['states']
+ assert invocation_states and 'ok' in invocation_states, jobs_summary
+ assert invocation_states['ok'] == 2, jobs_summary
+ assert jobs_summary['model'] == 'WorkflowInvocation', jobs_summary
+
+ jobs_summary_response = self._get("workflows/%s/invocations/%s/step_jobs_summary" % (workflow_id, invocation_id))
+ self._assert_status_code_is(jobs_summary_response, 200)
+ jobs_summary = jobs_summary_response.json()
+ assert len(jobs_summary) == 1
+ collection_summary = jobs_summary[0]
+ assert 'states' in collection_summary
+
+ collection_states = collection_summary['states']
+ assert collection_states and 'ok' in collection_states, collection_states
+ assert collection_states['ok'] == 2, collection_summary
+ assert collection_summary['model'] == 'ImplicitCollectionJobs', collection_summary
+
def test_workflow_run_input_mapping_with_subworkflows(self):
with self.dataset_populator.test_history() as history_id:
test_data = """
diff --git a/test/base/driver_util.py b/test/base/driver_util.py
index e168e8a77914..ee8e319f32ab 100644
--- a/test/base/driver_util.py
+++ b/test/base/driver_util.py
@@ -155,7 +155,6 @@ def setup_galaxy_config(
tmpdir = os.path.realpath(tmpdir)
if not os.path.exists(tmpdir):
os.makedirs(tmpdir)
- file_path = os.path.join(tmpdir, 'files')
template_cache_path = tempfile.mkdtemp(prefix='compiled_templates_', dir=tmpdir)
new_file_path = tempfile.mkdtemp(prefix='new_files_path_', dir=tmpdir)
job_working_directory = tempfile.mkdtemp(prefix='job_working_directory_', dir=tmpdir)
@@ -175,9 +174,6 @@ def setup_galaxy_config(
library_import_dir = None
job_config_file = os.environ.get('GALAXY_TEST_JOB_CONFIG_FILE', default_job_config_file)
tool_path = os.environ.get('GALAXY_TEST_TOOL_PATH', 'tools')
- tool_dependency_dir = os.environ.get('GALAXY_TOOL_DEPENDENCY_DIR', None)
- if tool_dependency_dir is None:
- tool_dependency_dir = tempfile.mkdtemp(dir=tmpdir, prefix="tool_dependencies")
tool_data_table_config_path = _tool_data_table_config_path(default_tool_data_table_config_path)
default_data_manager_config = None
for data_manager_config in ['config/data_manager_conf.xml', 'data_manager_conf.xml']:
@@ -221,10 +217,10 @@ def setup_galaxy_config(
conda_auto_install=conda_auto_install,
cleanup_job=cleanup_job,
retry_metadata_internally=False,
+ data_dir=tmpdir,
data_manager_config_file=data_manager_config_file,
enable_beta_tool_formats=True,
expose_dataset_path=True,
- file_path=file_path,
ftp_upload_purge=False,
galaxy_data_manager_data_path=galaxy_data_manager_data_path,
id_secret='changethisinproductiontoo',
@@ -292,11 +288,12 @@ def setup_galaxy_config(
if enable_tool_shed_check:
config["enable_tool_shed_check"] = enable_tool_shed_check
config["hours_between_check"] = 0.001
+ tool_dependency_dir = os.environ.get('GALAXY_TOOL_DEPENDENCY_DIR')
if tool_dependency_dir:
config["tool_dependency_dir"] = tool_dependency_dir
- # Used by shed's twill dependency stuff - todo read from
- # Galaxy's config API.
- os.environ["GALAXY_TEST_TOOL_DEPENDENCY_DIR"] = tool_dependency_dir
+ # Used by shed's twill dependency stuff
+ # TODO: read from Galaxy's config API.
+ os.environ["GALAXY_TEST_TOOL_DEPENDENCY_DIR"] = tool_dependency_dir or os.path.join(tmpdir, 'dependencies')
return config
diff --git a/test/base/populators.py b/test/base/populators.py
index bc4be2239026..88fb63530f63 100644
--- a/test/base/populators.py
+++ b/test/base/populators.py
@@ -106,6 +106,30 @@ def wrapped_method(api_test_case, *args, **kwargs):
return method_wrapper
+def skip_if_site_down(url):
+
+ def site_down():
+ try:
+ response = requests.get(url)
+ return response.status_code != 200
+ except Exception:
+ return False
+
+ def method_wrapper(method):
+ @wraps(method)
+ def wrapped_method(api_test_case, *args, **kwargs):
+ _raise_skip_if(site_down(), "Test depends on [%s] being up and it appears to be down." % url)
+ method(api_test_case, *args, **kwargs)
+
+ return wrapped_method
+
+ return method_wrapper
+
+
+skip_if_toolshed_down = skip_if_site_down("https://toolshed.g2.bx.psu.edu")
+skip_if_github_down = skip_if_site_down("https://github.com/")
+
+
def summarize_instance_history_on_error(method):
@wraps(method)
def wrapped_method(api_test_case, *args, **kwds):
@@ -134,10 +158,10 @@ def wrapped_method(api_test_case, *args, **kwds):
return method_wrapper
-def _raise_skip_if(check):
+def _raise_skip_if(check, *args):
if check:
from nose.plugins.skip import SkipTest
- raise SkipTest()
+ raise SkipTest(*args)
# Deprecated mixin, use dataset populator instead.
diff --git a/test/functional/tools/catDocker.xml b/test/functional/tools/catDocker.xml
index 1437ebb717b1..470fef9ccf09 100644
--- a/test/functional/tools/catDocker.xml
+++ b/test/functional/tools/catDocker.xml
@@ -1,28 +1,28 @@
-
- tail-to-head
-
- busybox:ubuntu-14.04
-
-
- echo "Galaxy slots passed through contain as \$GALAXY_SLOTS";
- cat $input1
- #for $q in $queries
- ${q.input2}
- #end for
- > $out_file1;
- echo "Work dir output" > working_file
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
+
+ tail-to-head
+
+ busybox:ubuntu-14.04
+
+
+ echo "Galaxy slots passed through contain as \$GALAXY_SLOTS";
+ cat $input1
+ #for $q in $queries
+ ${q.input2}
+ #end for
+ > $out_file1;
+ echo "Work dir output" > working_file
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/test/functional/tools/for_workflows/cat.xml b/test/functional/tools/for_workflows/cat.xml
index 19016e35d77e..850469141fdc 100644
--- a/test/functional/tools/for_workflows/cat.xml
+++ b/test/functional/tools/for_workflows/cat.xml
@@ -1,19 +1,19 @@
-
- tail-to-head
-
- cat $input1 #for $q in $queries# ${q.input2} #end for# > $out_file1
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
+
+ tail-to-head
+
+ cat $input1 #for $q in $queries# ${q.input2} #end for# > $out_file1
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/test/integration/embedded_pulsar_docker_job_conf.yml b/test/integration/embedded_pulsar_docker_job_conf.yml
new file mode 100644
index 000000000000..59c29a56af4c
--- /dev/null
+++ b/test/integration/embedded_pulsar_docker_job_conf.yml
@@ -0,0 +1,20 @@
+runners:
+ local:
+ load: galaxy.jobs.runners.local:LocalJobRunner
+ pulsar_embed:
+ load: galaxy.jobs.runners.pulsar:PulsarEmbeddedJobRunner
+
+execution:
+ default: pulsar_embed
+ environments:
+ local:
+ runner: local
+ pulsar_embed:
+ runner: pulsar_embed
+ docker_enabled: true
+ docker_sudo: false
+ docker_required: true
+
+tools:
+- id: upload1
+ environment: local
diff --git a/test/integration/embedded_pulsar_singularity_job_conf.yml b/test/integration/embedded_pulsar_singularity_job_conf.yml
new file mode 100644
index 000000000000..cdcdedc19ffc
--- /dev/null
+++ b/test/integration/embedded_pulsar_singularity_job_conf.yml
@@ -0,0 +1,19 @@
+runners:
+ local:
+ load: galaxy.jobs.runners.local:LocalJobRunner
+ pulsar_embed:
+ load: galaxy.jobs.runners.pulsar:PulsarEmbeddedJobRunner
+
+execution:
+ default: pulsar_embed
+ environments:
+ local:
+ runner: local
+ pulsar_embed:
+ runner: pulsar_embed
+ singularity_enabled: true
+ singularity_required: true
+
+tools:
+- id: upload1
+ environment: local
diff --git a/test/integration/test_containerized_jobs.py b/test/integration/test_containerized_jobs.py
index be16ba91ce0e..8ae6dcfd71b8 100644
--- a/test/integration/test_containerized_jobs.py
+++ b/test/integration/test_containerized_jobs.py
@@ -35,11 +35,22 @@ class ContainerizedIntegrationTestCase(integration_util.IntegrationTestCase):
@classmethod
def setUpClass(cls):
- if not which(cls.container_type):
- raise unittest.SkipTest("Executable '%s' not found on PATH" % cls.container_type)
+ skip_if_container_type_unavailable(cls)
super(ContainerizedIntegrationTestCase, cls).setUpClass()
+def disable_dependency_resolution(config):
+ # Disable tool dependency resolution.
+ config["tool_dependency_dir"] = "none"
+ config["conda_auto_init"] = False
+ config["conda_auto_install"] = False
+
+
+def skip_if_container_type_unavailable(cls):
+ if not which(cls.container_type):
+ raise unittest.SkipTest("Executable '%s' not found on PATH" % cls.container_type)
+
+
class DockerizedJobsIntegrationTestCase(integration_util.IntegrationTestCase, RunsEnvironmentJobs, MulledJobTestCases):
framework_tool_and_types = True
@@ -53,15 +64,11 @@ def handle_galaxy_config_kwds(cls, config):
cls.jobs_directory = cls._test_driver.mkdtemp()
config["jobs_directory"] = cls.jobs_directory
config["job_config_file"] = cls.job_config_file
- # Disable tool dependency resolution.
- config["tool_dependency_dir"] = "none"
- config["conda_auto_init"] = False
- config["conda_auto_install"] = False
+ disable_dependency_resolution(config)
@classmethod
def setUpClass(cls):
- if not which(cls.container_type):
- raise unittest.SkipTest("Executable '%s' not found on PATH" % cls.container_type)
+ skip_if_container_type_unavailable(cls)
super(DockerizedJobsIntegrationTestCase, cls).setUpClass()
def setUp(self):
@@ -109,6 +116,10 @@ def test_build_mulled(self):
assert any([True for d in response if d['dependency_type'] == self.container_type])
+# Singularity 2.4 in the official Vagrant issue has some problems running this test
+# case by default because subdirectories of /tmp don't bind correctly. Overridding
+# TMPDIR can fix this.
+# TMPDIR=/home/vagrant/tmp/ pytest test/integration/test_containerized_jobs.py::SingularityJobsIntegrationTestCase
class SingularityJobsIntegrationTestCase(DockerizedJobsIntegrationTestCase):
job_config_file = SINGULARITY_JOB_CONFIG_FILE
diff --git a/test/integration/test_data_manager_table_reload.py b/test/integration/test_data_manager_table_reload.py
index f63a503cfd08..3b8e32596aa4 100644
--- a/test/integration/test_data_manager_table_reload.py
+++ b/test/integration/test_data_manager_table_reload.py
@@ -2,7 +2,7 @@
import string
from base import integration_util
-from base.populators import DatasetPopulator
+from base.populators import DatasetPopulator, skip_if_toolshed_down
from nose.plugins.skip import SkipTest
from .uses_shed import CONDA_AUTO_INSTALL_JOB_TIMEOUT, UsesShed
@@ -43,6 +43,7 @@ def handle_galaxy_config_kwds(cls, config):
cls.username = cls.get_secure_ascii_digits()
config["admin_users"] = "%s@galaxy.org" % cls.username
+ @skip_if_toolshed_down
def test_data_manager_installation_table_reload(self):
"""
Test that we can install data managers, create a new dbkey, and use that dbkey in a downstream data manager.
diff --git a/test/integration/test_pulsar_embedded.py b/test/integration/test_pulsar_embedded.py
index eaf685d09712..9d1077081706 100644
--- a/test/integration/test_pulsar_embedded.py
+++ b/test/integration/test_pulsar_embedded.py
@@ -20,4 +20,9 @@ def handle_galaxy_config_kwds(cls, config):
instance = integration_util.integration_module_instance(EmbeddedPulsarIntegrationInstance)
-test_tools = integration_util.integration_tool_runner(["simple_constructs", "multi_data_param", "output_filter"])
+test_tools = integration_util.integration_tool_runner([
+ "simple_constructs",
+ "multi_data_param",
+ "output_filter",
+ "vcf_bgzip_test",
+])
diff --git a/test/integration/test_pulsar_embedded_containers.py b/test/integration/test_pulsar_embedded_containers.py
new file mode 100644
index 000000000000..25bebabd9526
--- /dev/null
+++ b/test/integration/test_pulsar_embedded_containers.py
@@ -0,0 +1,48 @@
+import os
+
+from base import integration_util # noqa: I100,I202
+from base.populators import (
+ DatasetPopulator,
+)
+from .test_containerized_jobs import ( # noqa: I201
+ disable_dependency_resolution,
+ MulledJobTestCases,
+ skip_if_container_type_unavailable,
+)
+
+SCRIPT_DIRECTORY = os.path.abspath(os.path.dirname(__file__))
+EMBEDDED_PULSAR_JOB_CONFIG_FILE_SINGULARITY = os.path.join(SCRIPT_DIRECTORY, "embedded_pulsar_singularity_job_conf.yml")
+EMBEDDED_PULSAR_JOB_CONFIG_FILE_DOCKER = os.path.join(SCRIPT_DIRECTORY, "embedded_pulsar_docker_job_conf.yml")
+
+
+class BaseEmbeddedPulsarContainerIntegrationTestCase(integration_util.IntegrationTestCase):
+ framework_tool_and_types = True
+
+ @classmethod
+ def handle_galaxy_config_kwds(cls, config):
+ cls.jobs_directory = cls._test_driver.mkdtemp()
+ config["jobs_directory"] = cls.jobs_directory
+ config["job_config_file"] = cls.job_config_file
+ disable_dependency_resolution(config)
+
+ def setUp(self):
+ super(BaseEmbeddedPulsarContainerIntegrationTestCase, self).setUp()
+ self.dataset_populator = DatasetPopulator(self.galaxy_interactor)
+ self.history_id = self.dataset_populator.new_history()
+
+ @classmethod
+ def setUpClass(cls):
+ skip_if_container_type_unavailable(cls)
+ super(BaseEmbeddedPulsarContainerIntegrationTestCase, cls).setUpClass()
+
+
+class EmbeddedSingularityPulsarIntegrationTestCase(BaseEmbeddedPulsarContainerIntegrationTestCase, MulledJobTestCases):
+ # singularity passes $HOME by default
+ default_container_home_dir = os.environ.get('HOME', '/')
+ job_config_file = EMBEDDED_PULSAR_JOB_CONFIG_FILE_SINGULARITY
+ container_type = 'singularity'
+
+
+class EmbeddedDockerPulsarIntegrationTestCase(BaseEmbeddedPulsarContainerIntegrationTestCase, MulledJobTestCases):
+ job_config_file = EMBEDDED_PULSAR_JOB_CONFIG_FILE_DOCKER
+ container_type = 'docker'
diff --git a/test/integration/test_shed_tool_tests.py b/test/integration/test_shed_tool_tests.py
index 1ed0015e9cb0..66d84bde41cd 100644
--- a/test/integration/test_shed_tool_tests.py
+++ b/test/integration/test_shed_tool_tests.py
@@ -1,4 +1,5 @@
from base import integration_util
+from base.populators import skip_if_toolshed_down
from .uses_shed import UsesShed
@@ -13,6 +14,7 @@ class ToolShedToolTestIntegrationTestCase(integration_util.IntegrationTestCase,
def handle_galaxy_config_kwds(cls, config):
cls.configure_shed_and_conda(config)
+ @skip_if_toolshed_down
def test_tool_test(self):
self.install_repository("devteam", "fastqc", "ff9530579d1f")
self._run_tool_test("toolshed.g2.bx.psu.edu/repos/devteam/fastqc/fastqc/0.71")
diff --git a/test/selenium_tests/framework.py b/test/selenium_tests/framework.py
index 3ad89b820c5c..0c4fb2a320aa 100644
--- a/test/selenium_tests/framework.py
+++ b/test/selenium_tests/framework.py
@@ -223,6 +223,8 @@ def setup_with_driver(self):
Overriding this instead of setUp will ensure debug data such as screenshots and stack traces
are dumped if there are problems with the setup and it will be re-ran on test retries.
"""
+ if self.ensure_registered:
+ self.login()
def tearDown(self):
exception = None
@@ -298,9 +300,6 @@ def setup_driver_and_session(self):
self._setup_galaxy_logging()
- if self.ensure_registered:
- self.login()
-
def _setup_galaxy_logging(self):
self.home()
self.driver.execute_script(SETUP_LOGGING_JS)
diff --git a/test/selenium_tests/test_admin_app.py b/test/selenium_tests/test_admin_app.py
index 205458a9ca97..746b4a4bc5bb 100644
--- a/test/selenium_tests/test_admin_app.py
+++ b/test/selenium_tests/test_admin_app.py
@@ -82,7 +82,6 @@ def test_admin_data_manager(self):
job_id = run_response.json()["jobs"][0]["id"]
self.dataset_populator.wait_for_tool_run(history_id=history_id,
run_response=run_response,
- timeout=5,
assert_ok=False)
admin_component.dm_jobs_button(data_manager="test-data-manager").wait_for_and_click()
diff --git a/test/selenium_tests/test_library_landing.py b/test/selenium_tests/test_library_landing.py
index 001e227030eb..39d1d14eb055 100644
--- a/test/selenium_tests/test_library_landing.py
+++ b/test/selenium_tests/test_library_landing.py
@@ -10,6 +10,7 @@ class LibraryLandingTestCase(SeleniumTestCase):
requires_admin = True
def setup_with_driver(self):
+ super(LibraryLandingTestCase, self).setup_with_driver()
self.admin_login()
self.libraries_open()
diff --git a/test/selenium_tests/test_tool_form.py b/test/selenium_tests/test_tool_form.py
index f8aef81abf28..4ba932dcc69c 100644
--- a/test/selenium_tests/test_tool_form.py
+++ b/test/selenium_tests/test_tool_form.py
@@ -1,7 +1,7 @@
import json
from base import rules_test_data
-from base.populators import flakey, load_data_dict
+from base.populators import flakey, load_data_dict, skip_if_github_down
from galaxy.selenium.navigates_galaxy import retry_call_during_transitions
from .framework import (
@@ -185,6 +185,7 @@ def test_run_apply_rules_4(self):
@selenium_test
@managed_history
+ @skip_if_github_down
def test_run_apply_rules_tutorial(self):
self.home()
self.upload_rule_start()
diff --git a/test/selenium_tests/test_workflow_management.py b/test/selenium_tests/test_workflow_management.py
index 3f7d614880bd..2c57da031651 100644
--- a/test/selenium_tests/test_workflow_management.py
+++ b/test/selenium_tests/test_workflow_management.py
@@ -1,3 +1,5 @@
+from base.populators import skip_if_github_down
+
from .framework import (
retry_assertion_during_transitions,
selenium_test,
@@ -17,7 +19,7 @@ def test_import_from_url(self):
table_elements = self.workflow_index_table_elements()
assert len(table_elements) == 1
- new_workflow = table_elements[0].find_element_by_css_selector("a.btn.btn-secondary")
+ new_workflow = table_elements[0].find_element_by_css_selector(".workflow-dropdown")
assert 'TestWorkflow1 (imported from uploaded file)' in new_workflow.text, new_workflow.text
@selenium_test
@@ -83,31 +85,11 @@ def test_index_search(self):
self.workflow_index_search_for("searchforthis")
self._assert_showing_n_workflows(1)
- @selenium_test
- def test_publishing_display(self):
- self.workflow_index_open()
- self._workflow_import_from_url()
- self.workflow_index_rename("managementesttopublish")
-
- published_column_index = 4
-
- @retry_assertion_during_transitions
- def assert_published_column_text_is(expected_text):
- column_text = self.workflow_index_column_text(published_column_index)
- self.assertEqual(expected_text, column_text)
-
- assert_published_column_text_is("No")
- self.workflow_index_click_option("Share")
- self.workflow_sharing_click_publish()
-
- self.workflow_index_open()
- assert_published_column_text_is("Yes")
- self.screenshot("workflow_manage_published")
-
@retry_assertion_during_transitions
def _assert_showing_n_workflows(self, n):
self.assertEqual(len(self.workflow_index_table_elements()), n)
+ @skip_if_github_down
def _workflow_import_from_url(self):
self.workflow_index_click_import()
url = "https://raw.githubusercontent.com/galaxyproject/galaxy/dev/test/base/data/test_workflow_1.ga"
diff --git a/test/selenium_tests/test_workflow_run.py b/test/selenium_tests/test_workflow_run.py
index c69d0017abaa..f276477d57a8 100644
--- a/test/selenium_tests/test_workflow_run.py
+++ b/test/selenium_tests/test_workflow_run.py
@@ -223,7 +223,7 @@ def workflow_run_specify_inputs(self, inputs):
def workflow_run_with_name(self, name):
self.workflow_index_open()
self.workflow_index_search_for(name)
- self.workflow_index_click_option("Run")
+ self.workflow_click_option(".workflow-run")
def _assert_has_3_lines_after_run(self, hid):
self.history_panel_wait_for_hid_ok(hid, allowed_force_refreshes=1)
diff --git a/test/shed_functional/base/twilltestcase.py b/test/shed_functional/base/twilltestcase.py
index 67d936106e33..b797007e376c 100644
--- a/test/shed_functional/base/twilltestcase.py
+++ b/test/shed_functional/base/twilltestcase.py
@@ -141,7 +141,7 @@ def create(self, cntrller='user', email='test@bx.psu.edu', password='testuser',
except Exception:
try:
# May have created the account in a previous test run...
- self.check_page_for_string("User with that email already exists")
+ self.check_page_for_string("User with email '%s' already exists." % email)
previously_created = True
except Exception:
try:
diff --git a/test/shed_functional/functional/test_0000_basic_repository_features.py b/test/shed_functional/functional/test_0000_basic_repository_features.py
index d5587a2cad7d..fd49bb34e531 100644
--- a/test/shed_functional/functional/test_0000_basic_repository_features.py
+++ b/test/shed_functional/functional/test_0000_basic_repository_features.py
@@ -248,7 +248,7 @@ def test_0090_verify_repository_metadata(self):
def test_0095_verify_reserved_repository_name_handling(self):
'''Check that reserved repository names are handled correctly.'''
category = self.test_db_util.get_category_by_name('Test 0000 Basic Repository Features 1')
- error_message = 'The term repos is a reserved word in the tool shed, so it cannot be used as a repository name.'
+ error_message = "The term 'repos' is a reserved word in the Tool Shed, so it cannot be used as a repository name."
self.get_or_create_repository(name='repos',
description=repository_description,
long_description=repository_long_description,
@@ -261,7 +261,7 @@ def test_0100_verify_reserved_username_handling(self):
self.login(email='baduser@bx.psu.edu', username='repos')
test_user_1 = self.test_db_util.get_user('baduser@bx.psu.edu')
assert test_user_1 is None, 'Creating user with public name "repos" succeeded.'
- error_message = 'The term repos is a reserved word in the tool shed, so it cannot be used as a public user name.'
+ error_message = "The term 'repos' is a reserved word in the Tool Shed, so it cannot be used as a public user name."
self.check_for_strings(strings_displayed=[error_message])
def test_0105_contact_repository_owner(self):
diff --git a/test/shed_functional/test_data/filtering/readme.txt b/test/shed_functional/test_data/filtering/readme.txt
index 9ad35df358ce..291b190d703b 100644
--- a/test/shed_functional/test_data/filtering/readme.txt
+++ b/test/shed_functional/test_data/filtering/readme.txt
@@ -1,2 +1,2 @@
-These characters should not result in a unicode decoding error: ������
+These characters should not result in a unicode decoding error: ������
....
\ No newline at end of file
diff --git a/test/unit/config/config_manage/embedded/config/galaxy.yml b/test/unit/config/config_manage/embedded/config/galaxy.yml
new file mode 100644
index 000000000000..97896d126d36
--- /dev/null
+++ b/test/unit/config/config_manage/embedded/config/galaxy.yml
@@ -0,0 +1,47 @@
+uwsgi:
+ http: 127.0.0.1:8080
+ buffer-size: 16384
+ processes: 1
+ threads: 4
+ offload-threads: 2
+ static-map: /static/style=static/style/blue
+ static-map: /static=static
+ static-map: /favicon.ico=static/favicon.ico
+ master: false
+ virtualenv: .venv
+ pythonpath: lib
+ module: galaxy.webapps.galaxy.buildapp:uwsgi_app()
+ manage-script-name: false
+ thunder-lock: false
+ die-on-term: true
+ hook-master-start: unix_signal:2 gracefully_kill_them_all
+ hook-master-start: unix_signal:15 gracefully_kill_them_all
+ py-call-osafterfork: true
+ enable-threads: true
+
+galaxy:
+ master_api_key: changethis
+ config_dir: 'false'
+ job_config:
+ runners:
+ local:
+ load: galaxy.jobs.runners.local:LocalJobRunner
+ workers: 1
+ dynamic:
+ rules_module: integration.delay_rules
+ execution:
+ default: local_dest
+ environments:
+ local_dest:
+ runner: dynamic
+ type: python
+ function: delay
+ upload_dest:
+ runner: local
+ tools:
+ - id: upload1
+ destination: upload_dest
+ dependency_resolvers:
+ - type: galaxy_packages
+ - type: galaxy_packages
+ versionless: true
diff --git a/test/unit/config/config_manage/simple/config/galaxy.yml b/test/unit/config/config_manage/simple/config/galaxy.yml
new file mode 100644
index 000000000000..cbb51747c6de
--- /dev/null
+++ b/test/unit/config/config_manage/simple/config/galaxy.yml
@@ -0,0 +1,24 @@
+uwsgi:
+ http: 127.0.0.1:8080
+ buffer-size: 16384
+ processes: 1
+ threads: 4
+ offload-threads: 2
+ static-map: /static/style=static/style/blue
+ static-map: /static=static
+ static-map: /favicon.ico=static/favicon.ico
+ master: false
+ virtualenv: .venv
+ pythonpath: lib
+ module: galaxy.webapps.galaxy.buildapp:uwsgi_app()
+ manage-script-name: false
+ thunder-lock: false
+ die-on-term: true
+ hook-master-start: unix_signal:2 gracefully_kill_them_all
+ hook-master-start: unix_signal:15 gracefully_kill_them_all
+ py-call-osafterfork: true
+ enable-threads: true
+
+galaxy:
+ master_api_key: changethis
+ config_dir: 'false'
diff --git a/test/unit/config/config_manage/test_config_manage.py b/test/unit/config/config_manage/test_config_manage.py
index 51165e40650b..7e7f1483b342 100644
--- a/test/unit/config/config_manage/test_config_manage.py
+++ b/test/unit/config/config_manage/test_config_manage.py
@@ -81,6 +81,16 @@ def test_build_uwsgi_yaml():
config_dir.manage_cli(["build_uwsgi_yaml"])
+def test_validate_simple_config():
+ with _config_directory("simple") as config_dir:
+ config_dir.manage_cli(["validate", "galaxy"])
+
+
+def test_validate_embedded_config():
+ with _config_directory("embedded") as config_dir:
+ config_dir.manage_cli(["validate", "galaxy"])
+
+
class _TestConfigDirectory(object):
def __init__(self, base_name):
diff --git a/test/unit/jobs/test_command_factory.py b/test/unit/jobs/test_command_factory.py
index baaf8b1a7735..4d633ec7e676 100644
--- a/test/unit/jobs/test_command_factory.py
+++ b/test/unit/jobs/test_command_factory.py
@@ -46,7 +46,7 @@ def test_shell_commands_external(self):
dep_commands = [". /opt/galaxy/tools/bowtie/default/env.sh"]
self.job_wrapper.dependency_shell_commands = dep_commands
self.__assert_command_is(_surround_command(
- "%s %s/tool_script.sh > ../tool_stdout 2> ../tool_stderr; return_code=$?" % (
+ "%s %s/tool_script.sh > ../outputs/tool_stdout 2> ../outputs/tool_stderr; return_code=$?" % (
self.job_wrapper.shell,
self.job_wrapper.working_directory,
)))
@@ -157,7 +157,7 @@ def __command(self, **extra_kwds):
def _surround_command(command):
- return '''rm -rf working; mkdir -p working; cd working; %s; sh -c "exit $return_code"''' % command
+ return '''rm -rf working outputs; mkdir -p working outputs; cd working; %s; sh -c "exit $return_code"''' % command
class MockJobWrapper(object):
diff --git a/test/unit/tools/test_evaluation.py b/test/unit/tools/test_evaluation.py
index 37243fbfdbfc..db94baf4ccaf 100644
--- a/test/unit/tools/test_evaluation.py
+++ b/test/unit/tools/test_evaluation.py
@@ -126,7 +126,7 @@ def test_configfiles_evaluation(self):
config_filename = extra_filenames[0]
config_basename = os.path.basename(config_filename)
# Verify config file written into working directory.
- self.assertEqual(os.path.join(self.test_directory, config_basename), config_filename)
+ self.assertEqual(os.path.join(self.test_directory, "configs", config_basename), config_filename)
# Verify config file contents are evaluated against parameters.
assert open(config_filename, "r").read() == "4"
self.assertEqual(command_line, "prog1 %s" % config_filename)
@@ -160,12 +160,7 @@ def get_options(trans, other_values):
"index_path": parameter
})
self.tool._command_line = "prog1 $index_path.fields.path"
-
- def test_path_rewriter(v):
- if v:
- v = v.replace("/old", "/new")
- return v
- self._set_compute_environment(path_rewriter=test_path_rewriter)
+ self._set_compute_environment(unstructured_path_rewrites={"/old": "/new"})
command_line, extra_filenames, _ = self.evaluator.build()
self.assertEqual(command_line, "prog1 /new/path/human")
@@ -226,17 +221,25 @@ def __init__(
working_directory,
input_paths=['/galaxy/files/dataset_1.dat'],
output_paths=['/galaxy/files/dataset_2.dat'],
- path_rewriter=None
+ unstructured_path_rewrites=None
):
self._new_file_path = new_file_path
self._working_directory = working_directory
self._input_paths = input_paths
self._output_paths = output_paths
- self._path_rewriter = path_rewriter
+ self._unstructured_path_rewrites = unstructured_path_rewrites or {}
def input_paths(self):
return self._input_paths
+ def input_path_rewrite(self, dataset):
+ path = self._input_paths[0]
+ return path.false_path if hasattr(path, "false_path") else path
+
+ def output_path_rewrite(self, dataset):
+ path = self._output_paths[0]
+ return path.false_path if hasattr(path, "false_path") else path
+
def output_paths(self):
return self._output_paths
@@ -252,11 +255,11 @@ def tmp_directory(self):
def new_file_path(self):
return self._new_file_path
- def unstructured_path_rewriter(self):
- if self._path_rewriter:
- return self._path_rewriter
- else:
- return super(TestComputeEnvironment, self).unstructured_path_rewriter()
+ def unstructured_path_rewrite(self, path):
+ for key, val in self._unstructured_path_rewrites.items():
+ if path.startswith(key):
+ return path.replace(key, val)
+ return None
def tool_directory(self):
return TEST_TOOL_DIRECTORY
@@ -283,6 +286,10 @@ def test_thresh_param(self):
def params_from_strings(self, params, app, ignore_errors=False):
return params_from_strings(self.inputs, params, app, ignore_errors)
+ @property
+ def config_file(self):
+ return ""
+
@property
def template_macro_params(self):
return {}
@@ -300,10 +307,6 @@ def outputs(self):
output1=ToolOutput("output1"),
)
- @property
- def config_file(self):
- return self._config_files[0]
-
@property
def tmp_directory_vars(self):
return ["TMP"]
diff --git a/test/unit/tools/test_toolbox.py b/test/unit/tools/test_toolbox.py
index ba1c89080121..f254eb5498e1 100644
--- a/test/unit/tools/test_toolbox.py
+++ b/test/unit/tools/test_toolbox.py
@@ -238,7 +238,7 @@ def check_no_tool_errors():
def _try_until_no_errors(self, f):
e = None
- for i in range(30):
+ for i in range(40):
try:
f()
return
diff --git a/test/unit/tools/test_wrappers.py b/test/unit/tools/test_wrappers.py
index 57248754e062..e0f8f789311e 100644
--- a/test/unit/tools/test_wrappers.py
+++ b/test/unit/tools/test_wrappers.py
@@ -69,7 +69,8 @@ def test_select_wrapper_multiple(tool):
@with_mock_tool
def test_select_wrapper_with_path_rewritting(tool):
parameter = _setup_blast_tool(tool, multiple=True)
- wrapper = SelectToolParameterWrapper(parameter, ["val1", "val2"], other_values={}, path_rewriter=lambda v: "Rewrite<%s>" % v)
+ compute_environment = MockComputeEnvironment(None)
+ wrapper = SelectToolParameterWrapper(parameter, ["val1", "val2"], other_values={}, compute_environment=compute_environment)
assert wrapper.fields.path == "Rewrite,Rewrite"
@@ -108,11 +109,27 @@ def test_dataset_wrapper():
def test_dataset_wrapper_false_path():
dataset = MockDataset()
new_path = "/new/path/dataset_123.dat"
- wrapper = DatasetFilenameWrapper(dataset, dataset_path=Bunch(false_path=new_path))
+ wrapper = DatasetFilenameWrapper(dataset, compute_environment=MockComputeEnvironment(false_path=new_path))
assert str(wrapper) == new_path
assert wrapper.file_name == new_path
+class MockComputeEnvironment(object):
+
+ def __init__(self, false_path, false_extra_files_path=None):
+ self.false_path = false_path
+ self.false_extra_files_path = false_extra_files_path
+
+ def input_path_rewrite(self, dataset):
+ return self.false_path
+
+ def input_extra_files_rewrite(self, dataset):
+ return self.false_extra_files_path
+
+ def unstructured_path_rewrite(self, path):
+ return "Rewrite<%s>" % path
+
+
def test_dataset_false_extra_files_path():
dataset = MockDataset()
@@ -121,13 +138,12 @@ def test_dataset_false_extra_files_path():
new_path = "/new/path/dataset_123.dat"
dataset_path = DatasetPath(123, MOCK_DATASET_PATH, false_path=new_path)
- wrapper = DatasetFilenameWrapper(dataset, dataset_path=dataset_path)
+ wrapper = DatasetFilenameWrapper(dataset, compute_environment=MockComputeEnvironment(dataset_path))
# Setting false_path is not enough to override
assert wrapper.extra_files_path == MOCK_DATASET_EXTRA_FILES_PATH
new_files_path = "/new/path/dataset_123_files"
- dataset_path = DatasetPath(123, MOCK_DATASET_PATH, false_path=new_path, false_extra_files_path=new_files_path)
- wrapper = DatasetFilenameWrapper(dataset, dataset_path=dataset_path)
+ wrapper = DatasetFilenameWrapper(dataset, compute_environment=MockComputeEnvironment(false_path=new_path, false_extra_files_path=new_files_path))
assert wrapper.extra_files_path == new_files_path