Skip to content
Snippets Groups Projects
Commit b33c4b5e authored by Arthur Le Bars's avatar Arthur Le Bars
Browse files

Added interpro and blast workflows and methods

parent 13e0e68a
No related branches found
No related tags found
1 merge request!1Release 1.0
...@@ -181,7 +181,6 @@ class DeploySpeciesStack(speciesData.SpeciesData): ...@@ -181,7 +181,6 @@ class DeploySpeciesStack(speciesData.SpeciesData):
:return: :return:
""" """
# Proceed to the traefik and authelia directories
os.chdir(self.main_dir) os.chdir(self.main_dir)
# Create directory tree # Create directory tree
...@@ -229,6 +228,18 @@ class DeploySpeciesStack(speciesData.SpeciesData): ...@@ -229,6 +228,18 @@ class DeploySpeciesStack(speciesData.SpeciesData):
os.chdir(self.main_dir) os.chdir(self.main_dir)
def make_orthology_compose_files(self):
"""
:return:
"""
os.chdir(self.main_dir)
self.make_dirs["./orthology", "./orthology/src_data", "./orthology/src_data/genomes",
"./orthology/src_data/gff", "./orthology/src_data/newicks", "./orthology/src_data/proteomes"]
def create_mounts(self, working_dir): def create_mounts(self, working_dir):
""" """
Create the folders (volumes) required by a container (to see required volumes, check their compose file) Create the folders (volumes) required by a container (to see required volumes, check their compose file)
......
...@@ -337,12 +337,20 @@ class RunWorkflow(speciesData.SpeciesData): ...@@ -337,12 +337,20 @@ class RunWorkflow(speciesData.SpeciesData):
elif "gff" in e["name"]: elif "gff" in e["name"]:
self.datasets["gff_file"] = e["ldda_id"] self.datasets["gff_file"] = e["ldda_id"]
logging.debug("\t" + e["name"] + ": " + e["ldda_id"]) logging.debug("\t" + e["name"] + ": " + e["ldda_id"])
elif e["name"].endswith(".xml") and e["name"].startswith("Interpro"):
self.datasets["interproscan_file"] = e["ldda_id"]
logging.debug("\t" + e["name"] + ": " + e["ldda_id"])
elif e["name"].endswith(".xml") and "diamond" in e["name"]:
self.datasets["blast_diamond_file"] = e["ldda_id"]
logging.debug("\t" + e["name"] + ": " + e["ldda_id"])
logging.info("Uploading datasets into history %s" % self.history_id) logging.info("Uploading datasets into history %s" % self.history_id)
self.instance.histories.upload_dataset_from_library(history_id=self.history_id, lib_dataset_id=self.datasets["genome_file"]) self.instance.histories.upload_dataset_from_library(history_id=self.history_id, lib_dataset_id=self.datasets["genome_file"])
self.instance.histories.upload_dataset_from_library(history_id=self.history_id, lib_dataset_id=self.datasets["gff_file"]) self.instance.histories.upload_dataset_from_library(history_id=self.history_id, lib_dataset_id=self.datasets["gff_file"])
self.instance.histories.upload_dataset_from_library(history_id=self.history_id, lib_dataset_id=self.datasets["transcripts_file"]) self.instance.histories.upload_dataset_from_library(history_id=self.history_id, lib_dataset_id=self.datasets["transcripts_file"])
self.instance.histories.upload_dataset_from_library(history_id=self.history_id, lib_dataset_id=self.datasets["proteins_file"]) self.instance.histories.upload_dataset_from_library(history_id=self.history_id, lib_dataset_id=self.datasets["proteins_file"])
self.instance.histories.upload_dataset_from_library(history_id=self.history_id, lib_dataset_id=self.datasets["interproscan_file"])
self.instance.histories.upload_dataset_from_library(history_id=self.history_id, lib_dataset_id=self.datasets["blast_diamond_file"])
_datasets = self.instance.datasets.get_datasets() _datasets = self.instance.datasets.get_datasets()
with open(os.path.join(self.main_dir, "datasets_ids.json"), "w") as datasets_ids_outfile: with open(os.path.join(self.main_dir, "datasets_ids.json"), "w") as datasets_ids_outfile:
...@@ -370,13 +378,14 @@ class RunWorkflow(speciesData.SpeciesData): ...@@ -370,13 +378,14 @@ class RunWorkflow(speciesData.SpeciesData):
history_datasets_li = self.instance.datasets.get_datasets() history_datasets_li = self.instance.datasets.get_datasets()
genome_dataset_hda_id, gff_dataset_hda_id, transcripts_dataset_hda_id, proteins_datasets_hda_id = None, None, None, None genome_dataset_hda_id, gff_dataset_hda_id, transcripts_dataset_hda_id, proteins_datasets_hda_id = None, None, None, None
interproscan_dataset_hda_id, blast_diamond_dataset_hda_id = None, None
genome_dataset_hda_id = history_datasets_li[3]["id"] genome_dataset_hda_id = history_datasets_li[3]["id"]
gff_dataset_hda_id = history_datasets_li[2]["id"] gff_dataset_hda_id = history_datasets_li[2]["id"]
transcripts_dataset_hda_id = history_datasets_li[1]["id"] transcripts_dataset_hda_id = history_datasets_li[1]["id"]
proteins_datasets_hda_id = history_datasets_li[0]["id"] proteins_datasets_hda_id = history_datasets_li[0]["id"]
for dataset_dict in history_datasets_li[0:3]: for dataset_dict in history_datasets_li[0:5]:
# Datasets imports should be the last jobs in history if the function calls are in correct order # Datasets imports should be the last jobs in history if the function calls are in correct order
# If not, add the function call "get_datasets_hda_ids()" just after "import_datasets_into_history()" # If not, add the function call "get_datasets_hda_ids()" just after "import_datasets_into_history()"
if dataset_dict["name"].endswith("proteins.fa"): if dataset_dict["name"].endswith("proteins.fa"):
...@@ -388,13 +397,20 @@ class RunWorkflow(speciesData.SpeciesData): ...@@ -388,13 +397,20 @@ class RunWorkflow(speciesData.SpeciesData):
elif dataset_dict["name"].endswith(".gff"): elif dataset_dict["name"].endswith(".gff"):
gff_dataset_hda_id = dataset_dict["id"] gff_dataset_hda_id = dataset_dict["id"]
logging.debug("gff dataset hda ID: %s" % gff_dataset_hda_id) logging.debug("gff dataset hda ID: %s" % gff_dataset_hda_id)
elif dataset_dict["name"].endwsith(".xml") and dataset["name"].startswith("Interpro"):
interproscan_dataset_hda_id = dataset_dict["id"]
logging.debug("InterproScan dataset hda ID: %s" % gff_dataset_hda_id)
elif dataset_dict["name"].endswith(".xml") and "diamond" in dataset_dict["name"]:
blast_diamond_dataset_hda_id = dataset_dict["id"]
logging.debug("Blast Diamond dataset hda ID: %s" % gff_dataset_hda_id)
else: else:
genome_dataset_hda_id = dataset_dict["id"] genome_dataset_hda_id = dataset_dict["id"]
logging.debug("Genome dataset hda id: %s" % genome_dataset_hda_id) logging.debug("Genome dataset hda id: %s" % genome_dataset_hda_id)
# Return a dict made of the hda ids # Return a dict made of the hda ids
return{"genome_hda_id": genome_dataset_hda_id, "transcripts_hda_id": transcripts_dataset_hda_id, return{"genome_hda_id": genome_dataset_hda_id, "transcripts_hda_id": transcripts_dataset_hda_id,
"proteins_hda_id": proteins_datasets_hda_id, "gff_hda_id": gff_dataset_hda_id} "proteins_hda_id": proteins_datasets_hda_id, "gff_hda_id": gff_dataset_hda_id,
"interproscan_hda_id": interproscan_dataset_hda_id, "blast_diamond_hda_id": blast_diamond_dataset_hda_id}
def get_organism_and_analyses_ids(self): def get_organism_and_analyses_ids(self):
""" """
...@@ -442,7 +458,7 @@ class RunWorkflow(speciesData.SpeciesData): ...@@ -442,7 +458,7 @@ class RunWorkflow(speciesData.SpeciesData):
ogs_analysis_output = json.loads(ogs_analysis_json_output)[0] ogs_analysis_output = json.loads(ogs_analysis_json_output)[0]
self.ogs_analysis_id = str(ogs_analysis_output["analysis_id"]) self.ogs_analysis_id = str(ogs_analysis_output["analysis_id"])
except IndexError: except IndexError:
logging.debug("no matching OGS analysis exists in the instance's chado database") logging.debug("No matching OGS analysis exists in the instance's chado database")
# Get the ID for the genome analysis in chado # Get the ID for the genome analysis in chado
genome_analysis = self.instance.tools.run_tool( genome_analysis = self.instance.tools.run_tool(
...@@ -457,7 +473,32 @@ class RunWorkflow(speciesData.SpeciesData): ...@@ -457,7 +473,32 @@ class RunWorkflow(speciesData.SpeciesData):
except IndexError: except IndexError:
logging.debug("no matching genome analysis exists in the instance's chado database") logging.debug("no matching genome analysis exists in the instance's chado database")
return {"org_id": self.org_id, "genome_analysis_id": self.genome_analysis_id, "ogs_analysis_id": self.ogs_analysis_id} interpro_analysis = self.instance.tools.run_tool(
tool_id="toolshed.g2.bx.psu.edu/repos/gga/chado_analysis_get_analyses/analysis_get_analyses/2.3.3",
history_id=self.history_id,
tool_inputs={"name": "InterproScan on OGS%s" % self.ogs_version})
interpro_analysis_job_out = interpro_analysis["outputs"][0]["id"]
interpro_analysis_json_output = self.instance.datasets.download_dataset(dataset_id=interpro_analysis_job_out)
try:
interpro_analysis_output = json.loads(interpro_analysis_json_output)[0]
self.interpro_analysis_id = str(interpro_analysis_output["analysis_id"])
except IndexError:
logging.debug("No matching InterproScan analysis exists in the instance's chado database")
blast_diamond_analysis = self.instance.tools.run_tool(
tool_id="toolshed.g2.bx.psu.edu/repos/gga/chado_analysis_get_analyses/analysis_get_analyses/2.3.3",
history_id=self.history_id,
tool_inputs={"name": "Diamond on OGS%s" % self.ogs_version})
blast_diamond_analysis_job_out = blast_diamond_analysis["outputs"][0]["id"]
blast_diamond_analysis_json_output = self.instance.datasets.download_dataset(dataset_id=blast_diamond_analysis_job_out)
try:
blast_diamond_analysis_output = json.loads(blast_diamond_analysis_json_output)[0]
self.blast_diamond_analysis_id = str(blast_diamond_analysis_output["analysis_id"])
except IndexError:
logging.debug("No matching InterproScan analysis exists in the instance's chado database")
return {"org_id": self.org_id, "genome_analysis_id": self.genome_analysis_id, "ogs_analysis_id": self.ogs_analysis_id,
"interpro_analysis_id": self.interpro_analysis_id, "blast_diamond_analysis_id": self.blast_diamond_analysis_id}
if __name__ == "__main__": if __name__ == "__main__":
...@@ -572,7 +613,7 @@ if __name__ == "__main__": ...@@ -572,7 +613,7 @@ if __name__ == "__main__":
# Import datasets into history and retrieve their hda IDs # Import datasets into history and retrieve their hda IDs
run_workflow_for_current_organism.import_datasets_into_history() run_workflow_for_current_organism.import_datasets_into_history()
run_workflow_for_current_organism.get_datasets_hda_ids() hda_ids = run_workflow_for_current_organism.get_datasets_hda_ids()
# Explicit workflow parameter names # Explicit workflow parameter names
GENOME_FASTA_FILE = "0" GENOME_FASTA_FILE = "0"
...@@ -656,8 +697,6 @@ if __name__ == "__main__": ...@@ -656,8 +697,6 @@ if __name__ == "__main__":
run_workflow_for_current_organism.datamap[GENOME_FASTA_FILE] = {"src": "hda", "id": hda_ids["genome_hda_id"]} run_workflow_for_current_organism.datamap[GENOME_FASTA_FILE] = {"src": "hda", "id": hda_ids["genome_hda_id"]}
run_workflow_for_current_organism.datamap[GFF_FILE] = {"src": "hda", "id": hda_ids["gff_hda_id"]} run_workflow_for_current_organism.datamap[GFF_FILE] = {"src": "hda", "id": hda_ids["gff_hda_id"]}
# run_workflow_for_current_organism.replace_placeholders_workflow(workflow_str=json.dumps(workflow))
# Run the jbrowse creation workflow # Run the jbrowse creation workflow
run_workflow_for_current_organism.run_workflow(workflow_path=workflow, run_workflow_for_current_organism.run_workflow(workflow_path=workflow,
workflow_parameters=workflow_parameters, workflow_parameters=workflow_parameters,
...@@ -672,13 +711,84 @@ if __name__ == "__main__": ...@@ -672,13 +711,84 @@ if __name__ == "__main__":
run_workflow_for_current_organism.set_get_history() run_workflow_for_current_organism.set_get_history()
# run_workflow_for_current_organism.get_species_history_id() # run_workflow_for_current_organism.get_species_history_id()
# Prepare the instance+history for the current organism (add organism and analyses in Chado)
# (although it should pose no problem as the "Chado add" refuses to duplicate an analysis/organism anyway)
if args.setup:
run_workflow_for_current_organism.prepare_history()
# Get the attributes of the instance and project data files # Get the attributes of the instance and project data files
run_workflow_for_current_organism.get_instance_attributes() run_workflow_for_current_organism.get_instance_attributes()
run_workflow_for_current_organism.get_organism_and_analyses_ids() run_workflow_for_current_organism.get_organism_and_analyses_ids()
# Import datasets into history and retrieve their hda IDs # Import datasets into history and retrieve their hda IDs
run_workflow_for_current_organism.import_datasets_into_history() run_workflow_for_current_organism.import_datasets_into_history()
run_workflow_for_current_organism.get_datasets_hda_ids() hda_ids = run_workflow_for_current_organism.get_datasets_hda_ids()
INTERPRO_FILE = "0"
LOAD_INTERPRO_IN_CHADO = "1"
SYNC_INTERPRO_ANALYSIS_INTO_TRIPAL = "2"
SYNC_FEATURES_INTO_TRIPAL = "3"
POPULATE_MAT_VIEWS = "4"
INDEX_TRIPAL_DATA = "5"
workflow_parameters = {}
workflow_parameters[INTERPRO_FILE] = {}
workflow_parameters[LOAD_INTERPRO_IN_CHADO] = {"organism": run_workflow_for_current_organism.org_id,
"analysis_id": run_workflow_for_current_organism.interpro_analysis_id}
workflow_parameters[SYNC_INTERPRO_ANALYSIS_INTO_TRIPAL] = {"analysis_id": run_workflow_for_current_organism.interpro_analysis_id}
run_workflow_for_current_organism.datamap = {}
run_workflow_for_current_organism.datamap[INTERPRO_FILE] = {"src": "hda", "id": hda_ids["interproscan_hda_id"]}
# Run Interproscan workflow
run_workflow_for_current_organism.run_workflow(workflow_path=workflow,
workflow_parameters=workflow_parameters,
datamap=run_workflow_for_current_organism.datamap,
workflow_name="Interproscan")
elif "Blast" in str(workflow):
logging.info("Executing workflow 'Blast_Diamond")
run_workflow_for_current_organism.connect_to_instance()
run_workflow_for_current_organism.set_get_history()
# run_workflow_for_current_organism.get_species_history_id()
# Prepare the instance+history for the current organism (add organism and analyses in Chado)
# (although it should pose no problem as the "Chado add" refuses to duplicate an analysis/organism anyway)
if args.setup:
run_workflow_for_current_organism.prepare_history()
# Get the attributes of the instance and project data files
run_workflow_for_current_organism.get_instance_attributes()
run_workflow_for_current_organism.get_organism_and_analyses_ids()
# Import datasets into history and retrieve their hda IDs
run_workflow_for_current_organism.import_datasets_into_history()
hda_ids = run_workflow_for_current_organism.get_datasets_hda_ids()
BLAST_FILE = "0"
LOAD_BLAST_IN_CHADO = "1"
SYNC_BLAST_ANALYSIS_INTO_TRIPAL = "2"
SYNC_FEATURES_INTO_TRIPAL = "3"
POPULATE_MAT_VIEWS = "4"
INDEX_TRIPAL_DATA = "5"
workflow_parameters = {}
workflow_parameters[INTERPRO_FILE] = {}
workflow_parameters[LOAD_BLAST_IN_CHADO] = {"organism": run_workflow_for_current_organism.org_id,
"analysis_id": run_workflow_for_current_organism.blast_diamond_analysis_id}
workflow_parameters[SYNC_BLAST_ANALYSIS_INTO_TRIPAL] = {"analysis_id": run_workflow_for_current_organism.blast_diamond_analysis_id}
run_workflow_for_current_organism.datamap = {}
run_workflow_for_current_organism.datamap[INTERPRO_FILE] = {"src": "hda", "id": hda_ids["interproscan_hda_id"]}
# Run Interproscan workflow
run_workflow_for_current_organism.run_workflow(workflow_path=workflow,
workflow_parameters=workflow_parameters,
datamap=run_workflow_for_current_organism.datamap,
workflow_name="Interproscan")
else: else:
......
# ./docker_data is created and filled with persistent data that should be backuped
version: '3.7'
services:
tripal:
image: quay.io/galaxy-genome-annotation/tripal:v2.x
depends_on:
- tripal-db
volumes:
- ./docker_data/galaxy/:/export/:ro
- ./src_data/:/data/:ro
environment:
DB_HOST: tripal-db.orthology
BASE_URL_PATH: /sp/orthology
UPLOAD_LIMIT: 20M
MEMORY_LIMIT: 512M
TRIPAL_GIT_CLONE_MODULES: "https://github.com/abretaud/tripal_rest_api.git[@c6f9021ea5d4c6d7c67c5bd363a7dd9359228bbc] https://github.com/abretaud/tripal_linkout.git[@91e08a11b788f005631cc0fafa4d68b1d2ce3ceb] https://github.com/abretaud/tripal_phylotree.git[@4cad769c9deaa5cd2933e1ae29be6aea8c8c0cc2]"
TRIPAL_DOWNLOAD_MODULES: ""
TRIPAL_ENABLE_MODULES: "tripal_rest_api tripal_phylotree tripal_linkout"
SITE_NAME: "Orthology"
ENABLE_JBROWSE: 0
ENABLE_APOLLO: 0
ENABLE_BLAST: 0
ENABLE_DOWNLOAD: 0
ENABLE_WIKI: 0
ENABLE_GO: 0
ENABLE_ORTHOLOGY: 1
# Must match the SITE_NAME in the gcv part
GCV_SITE_NAME: "example"
# Use this to point to the external adress of your main tripal host
MAIN_TRIPAL_URL: "http://{{ hostname }}"
#THEME: "{{ tripal_theme_name }}" # Use this to use another theme
#THEME_GIT_CLONE: "{{ tripal_theme_git_clone }}" # Use this to install another theme
ADMIN_PASSWORD: {{ tripal_password }} # You need to define it and update it in galaxy config below
networks:
- traefikbig
- orthology
deploy:
labels:
- "traefik.http.routers.orthology-tripal.rule=(Host(`{{ hostname }}`) && PathPrefix(`/sp/orthology`))"
- "traefik.http.routers.orthology-tripal.tls=true"
- "traefik.http.routers.orthology-tripal.entryPoints=webs"
- "traefik.http.routers.orthology-tripal.middlewares=sp-auth,sp-trailslash,sp-prefix,tripal-addprefix"
- "traefik.http.services.orthology-tripal.loadbalancer.server.port=80"
restart_policy:
condition: on-failure
delay: 5s
max_attempts: 3
window: 120s
tripal-db:
image: quay.io/galaxy-genome-annotation/chado:1.31-jenkins21-pg9.5
environment:
- POSTGRES_PASSWORD=postgres
# The default chado image would try to install the schema on first run,
# we just want the tools to be available.
- INSTALL_CHADO_SCHEMA=0
- INSTALL_YEAST_DATA=0
volumes:
- ./docker_data/tripal_db/:/var/lib/postgresql/data/
networks:
- orthology
gcv:
image: quay.io/abretaud/lis-gcv
depends_on:
- tripal-db
environment:
DB_HOST: tripal-db.orthology
HOST: {{ hostname }}
SITE_NAME: example
SITE_FULL_NAME: example
DEBUG: "false"
GCV_URL: http://{{ hostname }}/sp/orthology/gcv
SERVICES_URL: http://{{ hostname }}/sp/orthology/gcv_api/services
TRIPAL_URL: http://{{ hostname }}/sp/orthology/
USE_GENE_UNAME: "true"
networks:
- traefikbig
- orthology
deploy:
labels:
- "traefik.http.routers.orthology-gcv.rule=(Host(`{{ hostname }}`) && PathPrefix(`/sp/orthology/gcv`))"
- "traefik.http.routers.orthology-gcv.tls=true"
- "traefik.http.routers.orthology-gcv.entryPoints=webs"
- "traefik.http.routers.orthology-gcv.service=orthology-gcv"
- "traefik.http.routers.orthology-gcv.middlewares=sp-auth,sp-app-trailslash,sp-app-prefix"
- "traefik.http.services.orthology-gcv.loadbalancer.server.port=80"
- "traefik.http.routers.orthology-gcv-api.rule=(Host(`{{ hostname }}`) && PathPrefix(`/sp/orthology/gcv_api`))"
- "traefik.http.routers.orthology-gcv-api.tls=true"
- "traefik.http.routers.orthology-gcv-api.entryPoints=webs"
- "traefik.http.routers.orthology-gcv-api.service=orthology-gcv-api"
- "traefik.http.routers.orthology-gcv-api.middlewares=sp-auth,sp-app-trailslash,sp-app-prefix"
- "traefik.http.services.orthology-gcv-api.loadbalancer.server.port=8000"
restart_policy:
condition: on-failure
delay: 5s
max_attempts: 3
window: 120s
galaxy:
image: quay.io/galaxy-genome-annotation/docker-galaxy-annotation:gmod
volumes:
- ../galaxy_data_libs_SI.py:/opt/setup_data_libraries.py
- ./docker_data/galaxy/:/export/
- ./src_data/:/project_data/:ro
#- /groups/XXX/:/groups/XXX/:ro # We do this when we have symlinks in src_data pointing to /groups/XXX/...
environment:
NONUSE: nodejs,proftp,reports,condor
GALAXY_LOGGING: full
GALAXY_CONFIG_BRAND: "Orthology"
GALAXY_CONFIG_ALLOW_LIBRARY_PATH_PASTE: "True"
GALAXY_CONFIG_USE_REMOTE_USER: "True"
GALAXY_CONFIG_REMOTE_USER_MAILDOMAIN: "bipaa"
GALAXY_CONFIG_ADMIN_USERS: "admin@galaxy.org,gogepp@bipaa" # admin@galaxy.org is the default (leave it), gogepp@bipaa is a shared ldap user we use to connect
ENABLE_FIX_PERMS: 0
PROXY_PREFIX: /sp/orthology/galaxy
GALAXY_TRIPAL_URL: http://tripal.orthology/tripal/
GALAXY_TRIPAL_PASSWORD: {{ tripal_password }} # See tripal config above
GALAXY_CHADO_DBHOST: tripal-db.orthology
GALAXY_CHADO_DBSCHEMA: chado
GALAXY_AUTO_UPDATE_DB: 1
GALAXY_AUTO_UPDATE_CONDA: 1
GALAXY_AUTO_UPDATE_TOOLS: "/galaxy-central/tools_1.yaml"
GALAXY_SHARED_DIR: ""
BLAT_ENABLED: 1
networks:
- traefikbig
- orthology
deploy:
labels:
- "traefik.http.routers.orthology-galaxy.rule=(Host(`{{ hostname }}`) && PathPrefix(`/sp/orthology/galaxy`))"
- "traefik.http.routers.orthology-galaxy.tls=true"
- "traefik.http.routers.orthology-galaxy.entryPoints=webs"
- "traefik.http.routers.orthology-galaxy.middlewares=sp-auth,sp-app-trailslash,sp-app-prefix"
- "traefik.http.services.orthology-galaxy.loadbalancer.server.port=80"
restart_policy:
condition: on-failure
delay: 5s
max_attempts: 3
window: 120s
networks:
traefikbig:
external: true
orthology:
driver: overlay
name: orthology
{
"a_galaxy_workflow": "true",
"annotation": "",
"format-version": "0.1",
"name": "Blast_Diamond",
"steps": {
"0": {
"annotation": "",
"content_id": null,
"errors": null,
"id": 0,
"input_connections": {},
"inputs": [],
"label": null,
"name": "Input dataset",
"outputs": [],
"position": {
"left": 200,
"top": 200
},
"tool_id": null,
"tool_state": "{\"optional\": false}",
"tool_version": null,
"type": "data_input",
"uuid": "89e7487e-004d-4db1-b5eb-1676b98aebde",
"workflow_outputs": [
{
"label": null,
"output_name": "output",
"uuid": "c1602850-3c2c-44b9-87fa-af3509c6e751"
}
]
},
"1": {
"annotation": "",
"content_id": "toolshed.g2.bx.psu.edu/repos/gga/chado_load_blast/load_blast/2.3.4+galaxy0",
"errors": null,
"id": 1,
"input_connections": {
"input": {
"id": 0,
"output_name": "output"
},
"wait_for": {
"id": 0,
"output_name": "output"
}
},
"inputs": [
{
"description": "runtime parameter for tool Chado load Blast results",
"name": "analysis_id"
},
{
"description": "runtime parameter for tool Chado load Blast results",
"name": "input"
},
{
"description": "runtime parameter for tool Chado load Blast results",
"name": "organism_id"
},
{
"description": "runtime parameter for tool Chado load Blast results",
"name": "wait_for"
}
],
"label": null,
"name": "Chado load Blast results",
"outputs": [
{
"name": "results",
"type": "json"
}
],
"position": {
"left": 487.66668701171875,
"top": 200
},
"post_job_actions": {},
"tool_id": "toolshed.g2.bx.psu.edu/repos/gga/chado_load_blast/load_blast/2.3.4+galaxy0",
"tool_shed_repository": {
"changeset_revision": "08ae8b27b193",
"name": "chado_load_blast",
"owner": "gga",
"tool_shed": "toolshed.g2.bx.psu.edu"
},
"tool_state": "{\"analysis_id\": {\"__class__\": \"RuntimeValue\"}, \"blastdb_id\": \"59\", \"input\": {\"__class__\": \"RuntimeValue\"}, \"match_on_name\": \"false\", \"organism_id\": {\"__class__\": \"RuntimeValue\"}, \"psql_target\": {\"method\": \"remote\", \"__current_case__\": 0}, \"query_type\": \"polypeptide\", \"re_name\": \"\", \"skip_missing\": \"false\", \"wait_for\": {\"__class__\": \"RuntimeValue\"}, \"__page__\": null, \"__rerun_remap_job_id__\": null}",
"tool_version": "2.3.4+galaxy0",
"type": "tool",
"uuid": "a9d3b3a9-b377-44cd-bc10-87b37edb52b5",
"workflow_outputs": [
{
"label": null,
"output_name": "results",
"uuid": "e686e177-04ac-41e9-ab42-a40cd6d5a0a7"
}
]
},
"2": {
"annotation": "",
"content_id": "toolshed.g2.bx.psu.edu/repos/gga/tripal_analysis_sync/analysis_sync/3.2.1.0",
"errors": null,
"id": 2,
"input_connections": {
"wait_for": {
"id": 1,
"output_name": "results"
}
},
"inputs": [
{
"description": "runtime parameter for tool Synchronize an analysis",
"name": "analysis_id"
},
{
"description": "runtime parameter for tool Synchronize an analysis",
"name": "wait_for"
}
],
"label": null,
"name": "Synchronize an analysis",
"outputs": [
{
"name": "results",
"type": "json"
}
],
"position": {
"left": 775.3333740234375,
"top": 200
},
"post_job_actions": {},
"tool_id": "toolshed.g2.bx.psu.edu/repos/gga/tripal_analysis_sync/analysis_sync/3.2.1.0",
"tool_shed_repository": {
"changeset_revision": "f487ff676088",
"name": "tripal_analysis_sync",
"owner": "gga",
"tool_shed": "toolshed.g2.bx.psu.edu"
},
"tool_state": "{\"analysis_id\": {\"__class__\": \"RuntimeValue\"}, \"wait_for\": {\"__class__\": \"RuntimeValue\"}, \"__page__\": null, \"__rerun_remap_job_id__\": null}",
"tool_version": "3.2.1.0",
"type": "tool",
"uuid": "8ed9fc57-f900-456f-9c77-b75a20d90b76",
"workflow_outputs": [
{
"label": null,
"output_name": "results",
"uuid": "7c31aa69-2993-4d1f-a6ef-6a014cf68964"
}
]
},
"3": {
"annotation": "",
"content_id": "toolshed.g2.bx.psu.edu/repos/gga/tripal_db_populate_mviews/db_populate_mviews/3.2.1.0",
"errors": null,
"id": 3,
"input_connections": {
"wait_for": {
"id": 2,
"output_name": "results"
}
},
"inputs": [],
"label": null,
"name": "Populate materialized views",
"outputs": [
{
"name": "results",
"type": "txt"
}
],
"position": {
"left": 1063,
"top": 200
},
"post_job_actions": {},
"tool_id": "toolshed.g2.bx.psu.edu/repos/gga/tripal_db_populate_mviews/db_populate_mviews/3.2.1.0",
"tool_shed_repository": {
"changeset_revision": "3c08f32a3dc1",
"name": "tripal_db_populate_mviews",
"owner": "gga",
"tool_shed": "toolshed.g2.bx.psu.edu"
},
"tool_state": "{\"mview\": \"\", \"wait_for\": {\"__class__\": \"ConnectedValue\"}, \"__page__\": null, \"__rerun_remap_job_id__\": null}",
"tool_version": "3.2.1.0",
"type": "tool",
"uuid": "7da6acf6-87a1-4c26-a750-3bf410d2642d",
"workflow_outputs": [
{
"label": null,
"output_name": "results",
"uuid": "b747c370-0b27-4a8a-b1dd-71c0f9635cc6"
}
]
},
"4": {
"annotation": "",
"content_id": "toolshed.g2.bx.psu.edu/repos/gga/tripal_db_index/db_index/3.2.1.1",
"errors": null,
"id": 4,
"input_connections": {
"wait_for": {
"id": 3,
"output_name": "results"
}
},
"inputs": [],
"label": null,
"name": "Index Tripal data",
"outputs": [
{
"name": "results",
"type": "txt"
}
],
"position": {
"left": 1350.6666259765625,
"top": 200
},
"post_job_actions": {},
"tool_id": "toolshed.g2.bx.psu.edu/repos/gga/tripal_db_index/db_index/3.2.1.1",
"tool_shed_repository": {
"changeset_revision": "d55a39f12dda",
"name": "tripal_db_index",
"owner": "gga",
"tool_shed": "toolshed.g2.bx.psu.edu"
},
"tool_state": "{\"expose\": {\"do_expose\": \"no\", \"__current_case__\": 0}, \"queues\": \"10\", \"table\": {\"mode\": \"website\", \"__current_case__\": 0}, \"tokenizer\": \"standard\", \"wait_for\": {\"__class__\": \"ConnectedValue\"}, \"__page__\": null, \"__rerun_remap_job_id__\": null}",
"tool_version": "3.2.1.1",
"type": "tool",
"uuid": "5808d837-a507-4fe5-8a90-ad1934645b56",
"workflow_outputs": [
{
"label": null,
"output_name": "results",
"uuid": "2db45aa2-e09e-493b-b095-47dac7dff6fc"
}
]
}
},
"tags": [],
"uuid": "3119f6f5-5500-4d9f-b939-123c54d31968",
"version": 1
}
\ No newline at end of file
{
"a_galaxy_workflow": "true",
"annotation": "",
"format-version": "0.1",
"name": "Interproscan",
"steps": {
"0": {
"annotation": "",
"content_id": null,
"errors": null,
"id": 0,
"input_connections": {},
"inputs": [],
"label": null,
"name": "Input dataset",
"outputs": [],
"position": {
"left": 200,
"top": 200
},
"tool_id": null,
"tool_state": "{\"optional\": false}",
"tool_version": null,
"type": "data_input",
"uuid": "89e7487e-004d-4db1-b5eb-1676b98aebde",
"workflow_outputs": [
{
"label": null,
"output_name": "output",
"uuid": "032bfe93-96b6-4628-a7e2-209f92a94b4f"
}
]
},
"1": {
"annotation": "",
"content_id": "toolshed.g2.bx.psu.edu/repos/gga/chado_load_interpro/load_interpro/2.3.4+galaxy0",
"errors": null,
"id": 1,
"input_connections": {
"input": {
"id": 0,
"output_name": "output"
},
"wait_for": {
"id": 0,
"output_name": "output"
}
},
"inputs": [
{
"description": "runtime parameter for tool Chado load InterProScan results",
"name": "analysis_id"
},
{
"description": "runtime parameter for tool Chado load InterProScan results",
"name": "organism_id"
}
],
"label": null,
"name": "Chado load InterProScan results",
"outputs": [
{
"name": "results",
"type": "json"
}
],
"position": {
"left": 487.66668701171875,
"top": 200
},
"post_job_actions": {},
"tool_id": "toolshed.g2.bx.psu.edu/repos/gga/chado_load_interpro/load_interpro/2.3.4+galaxy0",
"tool_shed_repository": {
"changeset_revision": "3284a0c7570e",
"name": "chado_load_interpro",
"owner": "gga",
"tool_shed": "toolshed.g2.bx.psu.edu"
},
"tool_state": "{\"analysis_id\": {\"__class__\": \"RuntimeValue\"}, \"input\": {\"__class__\": \"ConnectedValue\"}, \"match_on_name\": \"false\", \"organism_id\": {\"__class__\": \"RuntimeValue\"}, \"parse_go\": \"false\", \"psql_target\": {\"method\": \"remote\", \"__current_case__\": 0}, \"query_type\": \"polypeptide\", \"re_name\": \"\", \"skip_missing\": \"false\", \"wait_for\": {\"__class__\": \"ConnectedValue\"}, \"__page__\": null, \"__rerun_remap_job_id__\": null}",
"tool_version": "2.3.4+galaxy0",
"type": "tool",
"uuid": "81eab133-ed2e-4302-9c3c-75c035a12840",
"workflow_outputs": [
{
"label": null,
"output_name": "results",
"uuid": "e53f0344-9fde-4489-a951-636507c6bef8"
}
]
},
"2": {
"annotation": "",
"content_id": "toolshed.g2.bx.psu.edu/repos/gga/tripal_analysis_sync/analysis_sync/3.2.1.0",
"errors": null,
"id": 2,
"input_connections": {
"wait_for": {
"id": 1,
"output_name": "results"
}
},
"inputs": [
{
"description": "runtime parameter for tool Synchronize an analysis",
"name": "analysis_id"
},
{
"description": "runtime parameter for tool Synchronize an analysis",
"name": "wait_for"
}
],
"label": null,
"name": "Synchronize an analysis",
"outputs": [
{
"name": "results",
"type": "json"
}
],
"position": {
"left": 775.3333740234375,
"top": 200
},
"post_job_actions": {},
"tool_id": "toolshed.g2.bx.psu.edu/repos/gga/tripal_analysis_sync/analysis_sync/3.2.1.0",
"tool_shed_repository": {
"changeset_revision": "f487ff676088",
"name": "tripal_analysis_sync",
"owner": "gga",
"tool_shed": "toolshed.g2.bx.psu.edu"
},
"tool_state": "{\"analysis_id\": {\"__class__\": \"RuntimeValue\"}, \"wait_for\": {\"__class__\": \"RuntimeValue\"}, \"__page__\": null, \"__rerun_remap_job_id__\": null}",
"tool_version": "3.2.1.0",
"type": "tool",
"uuid": "8ed9fc57-f900-456f-9c77-b75a20d90b76",
"workflow_outputs": [
{
"label": null,
"output_name": "results",
"uuid": "7c31aa69-2993-4d1f-a6ef-6a014cf68964"
}
]
},
"3": {
"annotation": "",
"content_id": "toolshed.g2.bx.psu.edu/repos/gga/tripal_db_populate_mviews/db_populate_mviews/3.2.1.0",
"errors": null,
"id": 3,
"input_connections": {
"wait_for": {
"id": 2,
"output_name": "results"
}
},
"inputs": [],
"label": null,
"name": "Populate materialized views",
"outputs": [
{
"name": "results",
"type": "txt"
}
],
"position": {
"left": 1063,
"top": 200
},
"post_job_actions": {},
"tool_id": "toolshed.g2.bx.psu.edu/repos/gga/tripal_db_populate_mviews/db_populate_mviews/3.2.1.0",
"tool_shed_repository": {
"changeset_revision": "3c08f32a3dc1",
"name": "tripal_db_populate_mviews",
"owner": "gga",
"tool_shed": "toolshed.g2.bx.psu.edu"
},
"tool_state": "{\"mview\": \"\", \"wait_for\": {\"__class__\": \"ConnectedValue\"}, \"__page__\": null, \"__rerun_remap_job_id__\": null}",
"tool_version": "3.2.1.0",
"type": "tool",
"uuid": "7da6acf6-87a1-4c26-a750-3bf410d2642d",
"workflow_outputs": [
{
"label": null,
"output_name": "results",
"uuid": "b747c370-0b27-4a8a-b1dd-71c0f9635cc6"
}
]
},
"4": {
"annotation": "",
"content_id": "toolshed.g2.bx.psu.edu/repos/gga/tripal_db_index/db_index/3.2.1.1",
"errors": null,
"id": 4,
"input_connections": {
"wait_for": {
"id": 3,
"output_name": "results"
}
},
"inputs": [],
"label": null,
"name": "Index Tripal data",
"outputs": [
{
"name": "results",
"type": "txt"
}
],
"position": {
"left": 1350.6666259765625,
"top": 200
},
"post_job_actions": {},
"tool_id": "toolshed.g2.bx.psu.edu/repos/gga/tripal_db_index/db_index/3.2.1.1",
"tool_shed_repository": {
"changeset_revision": "d55a39f12dda",
"name": "tripal_db_index",
"owner": "gga",
"tool_shed": "toolshed.g2.bx.psu.edu"
},
"tool_state": "{\"expose\": {\"do_expose\": \"no\", \"__current_case__\": 0}, \"queues\": \"10\", \"table\": {\"mode\": \"website\", \"__current_case__\": 0}, \"tokenizer\": \"standard\", \"wait_for\": {\"__class__\": \"ConnectedValue\"}, \"__page__\": null, \"__rerun_remap_job_id__\": null}",
"tool_version": "3.2.1.1",
"type": "tool",
"uuid": "5808d837-a507-4fe5-8a90-ad1934645b56",
"workflow_outputs": [
{
"label": null,
"output_name": "results",
"uuid": "2db45aa2-e09e-493b-b095-47dac7dff6fc"
}
]
}
},
"tags": [],
"uuid": "3446613c-b251-4a11-98fe-4965948773c3",
"version": 4
}
\ No newline at end of file
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment