Skip to content
Snippets Groups Projects
Commit 10ddfb28 authored by Arthur Le Bars's avatar Arthur Le Bars
Browse files

Fixed Jbrowse workflow (removed runtime str parameter altogether) plus some...

Fixed Jbrowse workflow (removed runtime str parameter altogether) plus some formatting and code cleaning
parent 465597c3
No related branches found
No related tags found
1 merge request!1Release 1.0
......@@ -33,7 +33,6 @@ class DeploySpeciesStack(speciesData.SpeciesData):
the organism's directory tree to create the required docker-compose files
"""
def make_directory_tree(self):
"""
Generate the directory tree for an organism and move datasets into src_data
......@@ -55,7 +54,7 @@ class DeploySpeciesStack(speciesData.SpeciesData):
sys.exit()
# Copy the custom banner to the species dir (banner used in tripal pages)
# if not "custom_banner" not in self.config.keys() or not self.config["custom_banner"] == "/path/to/banner" or not self.config["custom_banner"] == "":
# if "custom_banner" not in self.config.keys() or not self.config["custom_banner"] == "/path/to/banner" or not self.config["custom_banner"] == "":
# try:
# logging.debug("Custom banner path: %s" % self.config["custom_banner"])
# if os.path.isfile(os.path.abspath(self.config["custom_banner"])):
......@@ -92,9 +91,9 @@ class DeploySpeciesStack(speciesData.SpeciesData):
os.mkdir("./src_data/tracks")
except FileExistsError:
logging.debug("Depth 1 src_data folder(s) already exist for %s" % self.full_name)
except PermissionError:
except PermissionError as exc:
logging.critical("Insufficient permission to create src_data directory tree")
sys.exit()
sys.exit(exc)
# Depth 2
try:
......@@ -102,9 +101,9 @@ class DeploySpeciesStack(speciesData.SpeciesData):
os.mkdir("./src_data/genome/" + self.species_folder_name)
except FileExistsError:
logging.debug("Depth 2 src_data folder(s) already exist for %s" % self.full_name)
except PermissionError:
except PermissionError as exc:
logging.critical("Insufficient permission to create src_data directory tree")
sys.exit()
sys.exit(exc)
# Depth 3
try:
......@@ -114,9 +113,9 @@ class DeploySpeciesStack(speciesData.SpeciesData):
organism_genome_dir = os.path.abspath("./src_data/genome/" + self.species_folder_name + "/v" + self.genome_version)
except FileExistsError:
logging.debug("Depth 3 src_data folder(s) already exist for %s" % self.full_name)
except PermissionError:
except PermissionError as exc:
logging.critical("Insufficient permission to create src_data directory tree")
sys.exit()
sys.exit(exc)
# Return to main_dir
os.chdir(self.main_dir)
......@@ -138,21 +137,24 @@ class DeploySpeciesStack(speciesData.SpeciesData):
logging.critical("Cannot access " + self.species_dir)
sys.exit(0)
# Path to the templates used to generate the custom docker-compose files for an input species
gspecies_template_path = self.script_dir + "/templates/gspecies_compose_template.yml.j2"
# Path to the templates used to generate the custom docker-compose files for an input species and the traefik+authelia services
gspecies_template_path = self.script_dir + "/templates/gspecies_compose_template.yml.j2" # Jinja template path
traefik_template_path = self.script_dir + "/templates/traefik_compose_template.yml"
# authelia_config_path = self.script_dir + "/templates/authelia_config_example.yml" # Do not copy the authelia config!
authelia_users_path = self.script_dir + "/templates/authelia_users_template.yml"
# Set the genus_species_strain_sex var, used
genus_species_strain_sex = "{0}_{1}".format(self.genus.lower(), self.species)
# Set the genus_species_strain_sex value for replacing template
genus_species_strain_sex = "{0}_{1}".format(self.genus.lower(), self.specieslower())
if self.sex and self.strain:
genus_species_strain_sex = "_".join([self.genus.lower(), self.species, self.strain, self.sex])
genus_species_strain_sex = "_".join([self.genus.lower(), self.species.lower(), self.strain, self.sex])
elif self.sex and not self.strain:
genus_species_strain_sex = "_".join([self.genus.lower(), self.species, self.sex])
genus_species_strain_sex = "_".join([self.genus.lower(), self.specieslower(), self.sex])
elif self.genus and not self.species and not self.sex and not self.strain:
genus_species_strain_sex = self.genus.lower()
else:
genus_species_strain_sex = "{0}_{1}".format(self.genus.lower(), self.species)
genus_species_strain_sex = "{0}_{1}".format(self.genus.lower(), self.specieslower())
# # Method without Jinja templating (deprecated)
# with open(stack_template_path, 'r') as infile:
# organism_content = list()
# for line in infile:
......@@ -180,7 +182,8 @@ class DeploySpeciesStack(speciesData.SpeciesData):
# self.create_mounts(working_dir=self.species_dir)
# jinja templating, handled using the python jinja module (TODO: use ansible to handle the templating in production)
# jinja templating, handled using the python jinja module
# TODO: use ansible to handle the templating in production
file_loader = FileSystemLoader(self.script_dir + "/templates")
env = Environment(loader=file_loader)
template = env.get_template("gspecies_compose_template.yml.j2")
......@@ -195,7 +198,8 @@ class DeploySpeciesStack(speciesData.SpeciesData):
render_vars = {**self.config, **input_vars}
output = template.render(render_vars)
print(output)
with open(os.path.join(self.species_dir, "docker-compose.yml", "w")) as gspecies_compose_file:
print("Writing gspecies compose yml file")
try:
os.chdir(os.path.abspath(self.main_dir))
......@@ -229,7 +233,6 @@ class DeploySpeciesStack(speciesData.SpeciesData):
os.chdir(self.main_dir)
def create_mounts(self, working_dir):
"""
Create the folders (volumes) required by a container (to see required volumes, check their compose file)
......@@ -403,7 +406,7 @@ if __name__ == "__main__":
logging.info("Stack deployed for %s" % deploy_stack_for_current_organism.full_name)
# TODO: IF GENUS°1 == GENUS°2 AND SP°1 == SP°2 --> SKIP INIT, CONTINUE TO NEXT ITEM IN INPUT (DEPLOY AT THE END)
# TODO: if GENUS°1 == GENUS°2 AND SP°1 == SP°2 --> skip init, continue to next item and only deploy once the loop is done
# TODO: RELOAD TRAEFIK OUTSIDE LOOP
# TODO: reload traefik outside loop
logging.info("All stacks deployed for organisms in input file %s" % args.input)
......@@ -9,12 +9,13 @@ import logging
import sys
import json
import time
import utilities
import speciesData
from bioblend.galaxy.objects import GalaxyInstance
from bioblend import galaxy
import utilities
import speciesData
"""
gga_init.py
......@@ -29,29 +30,25 @@ class RunWorkflow(speciesData.SpeciesData):
This script is made to work for a Phaeoexplorer-specific workflow, but can be adapted to run any workflow,
provided the user creates their own workflow in a .ga format, and change the set_parameters function
to have the correct parameters for their workflow (TODO: use a mapping file for parameters and the .ga file)
to have the correct parameters for their workflow
"""
def set_get_history(self):
"""
Create or set the working history to the current species one
TODO move to utilities
:return:
"""
try:
histories = self.instance.histories.get_histories(name=str(self.full_name))
self.history_id = histories[0]["id"]
logging.info("History for {0}: {1}".format(self.full_name, self.history_id))
except IndexError:
logging.info("Creating history for %s" % self.full_name)
self.instance.histories.create_history(name=str(self.full_name))
histories = self.instance.histories.get_histories(name=str(self.full_name))
self.history_id = histories[0]["id"]
logging.info("History for {0}: {1}".format(self.full_name, self.history_id))
return self.history_id
......@@ -65,8 +62,7 @@ class RunWorkflow(speciesData.SpeciesData):
:return:
"""
histories = self.instance.histories.get_histories(name=str(self.full_name))
self.history_id = histories[0]["id"]
self.set_get_history()
logging.debug("History ID: " + self.history_id)
libraries = self.instance.libraries.get_libraries() # normally only one library
......@@ -110,7 +106,6 @@ class RunWorkflow(speciesData.SpeciesData):
return {"history_id": self.history_id, "library_id": library_id, "datasets": self.datasets}
def connect_to_instance(self):
"""
Test the connection to the galaxy instance for the current organism
......@@ -132,7 +127,6 @@ class RunWorkflow(speciesData.SpeciesData):
logging.info("Successfully connected to galaxy instance %s" % self.instance_url)
return 1
def prepare_history(self):
"""
Galaxy instance startup in preparation for importing datasets and running a workflow
......@@ -145,7 +139,7 @@ class RunWorkflow(speciesData.SpeciesData):
"""
self.connect_to_instance()
histories = self.instance.histories.get_histories(name=str(self.full_name))
self.set_get_history()
# Add organism (species) to chado
......@@ -194,7 +188,6 @@ class RunWorkflow(speciesData.SpeciesData):
logging.info("Finished initializing instance")
def run_workflow(self, workflow_path, workflow_parameters, workflow_name, datamap):
"""
Run a workflow in galaxy
......@@ -207,22 +200,34 @@ class RunWorkflow(speciesData.SpeciesData):
"""
logging.info("Importing workflow %s" % str(workflow_path))
workflow_ga_file = workflow_path
with open(workflow_ga_file, 'r') as ga_in_file:
# Store the decoded json dictionary
# Load the workflow file (.ga) in a buffer
with open(workflow_path, 'r') as ga_in_file:
# Then store the decoded json dictionary
workflow_dict = json.load(ga_in_file)
# In case of the Jbrowse workflow, we unfortunately have to manually edit the parameters instead of setting them
# as runtime values, using runtime parameters makes the tool throw an internal critical error ("replace not found" error)
if workflow_name == "Jbrowse":
workflow_dict["steps"]["2"]["tool_state"] = workflow_dict["steps"]["2"]["tool_state"].replace("__MENU_URL__", "http://{hostname}:{port}/sp/{genus_sp}/feature/{Genus}/{species}/{id}".format(hostname=self.config["hostname"], port=self.config["http_port"], genus_sp=self.genus_species, Genus=self.genus_uppercase, species=self.species, id="{id}"))
workflow_dict["steps"]["3"]["tool_state"] = workflow_dict["steps"]["3"]["tool_state"].replace("__FULL_NAME__", self.full_name).replace("__UNIQUE_ID__", self.abbreviation)
# Import the workflow in galaxy as a dict
self.instance.workflows.import_workflow_dict(workflow_dict=workflow_dict)
# Get its attributes
workflow_attributes = self.instance.workflows.get_workflows(name=workflow_name)
workflow_id = workflow_attributes[0]["id"] # Most recently imported workflow
# Then get its ID (required to invoke the workflow)
workflow_id = workflow_attributes[0]["id"] # Index 0 is the most recently imported workflow (the one we want)
show_workflow = self.instance.workflows.show_workflow(workflow_id=workflow_id)
# Check if the workflow is found
try:
logging.debug("Workflow ID: %s" % workflow_id)
# logging.debug("Workflow information:\n%s" % str(show_workflow))
except Exception:
except bioblend.ConnectionError:
logging.warning("Error retrieving workflow attributes for workflow %s" % workflow_name)
# Finally, invoke the workflow alogn with its datamap, parameters and the history in which to invoke it
self.instance.workflows.invoke_workflow(workflow_id=workflow_id,
history_id=self.history_id,
params=workflow_parameters,
......@@ -230,29 +235,28 @@ class RunWorkflow(speciesData.SpeciesData):
inputs_by="",
allow_tool_state_corrections=True)
logging.info("Successfully imported and invoked workflow {0}, check your galaxy instance ({1}) for the jobs state".format(workflow_name, self.instance_url))
logging.info("Successfully imported and invoked workflow {0}, check the galaxy instance ({1}) for the jobs state".format(workflow_name, self.instance_url))
def get_invocation_report(self, workflow_name):
"""
Debugging method for workflows
Get report of a workflow invocation (execution of a workflow in the instance via the API
Simply logs and returns a report of the previous workflow invocation (execution of a workflow in
the instance via the API)
:param workflow_name:
:return:
"""
workflow_attributes = self.instance.workflows.get_workflows(name=workflow_name)
workflow_id = workflow_attributes[1]["id"] # Most recently imported workflow
print("Workflow ID: %s" % workflow_id)
workflow_id = workflow_attributes[1]["id"] # Most recently imported workflow (index 1 in the list)
invocations = self.instance.workflows.get_invocations(workflow_id=workflow_id)
print(invocations)
invocation_id = invocations[0]["id"]
invocation_id = invocations[1]["id"] # Most recent invocation
invocation_report = self.instance.invocations.get_invocation_report(invocation_id=invocation_id)
logging.debug(invocation_report)
return invocation_report
def import_datasets_into_history(self):
"""
......@@ -293,23 +297,23 @@ class RunWorkflow(speciesData.SpeciesData):
if type(e) == dict:
if e["name"].endswith(".fa"):
self.datasets["genome_file"] = e["ldda_id"]
logging.info("\t" + e["name"] + ": " + e["ldda_id"])
logging.debug("\t" + e["name"] + ": " + e["ldda_id"])
if k == "/annotation":
sub_folder_content = self.instance.folders.show_folder(folder_id=v, contents=True)
final_sub_folder_content = self.instance.folders.show_folder(folder_id=sub_folder_content["folder_contents"][0]["id"], contents=True)
for k2, v2 in final_sub_folder_content.items():
for e in v2:
if type(e) == dict:
# TODO: manage versions? (differentiate between the correct folders using self.config)
# TODO: manage genome and ogs versions (differentiate between the correct folders using self.config)
if "transcripts" in e["name"]:
self.datasets["transcripts_file"] = e["ldda_id"]
logging.info("\t" + e["name"] + ": " + e["ldda_id"])
logging.debug("\t" + e["name"] + ": " + e["ldda_id"])
elif "proteins" in e["name"]:
self.datasets["proteins_file"] = e["ldda_id"]
logging.info("\t" + e["name"] + ": " + e["ldda_id"])
logging.debug("\t" + e["name"] + ": " + e["ldda_id"])
elif "gff" in e["name"]:
self.datasets["gff_file"] = e["ldda_id"]
logging.info("\t" + e["name"] + ": " + e["ldda_id"])
logging.debug("\t" + e["name"] + ": " + e["ldda_id"])
logging.info("Uploading datasets into history %s" % self.history_id)
self.instance.histories.upload_dataset_from_library(history_id=self.history_id, lib_dataset_id=self.datasets["genome_file"])
......@@ -323,18 +327,15 @@ class RunWorkflow(speciesData.SpeciesData):
return {"history_id": self.history_id, "library_id": library_id, "datasets": self.datasets}
def get_datasets_hda_ids(self):
"""
Get the hda IDs of the datasets imported into an history
The most "recent" imports will be prioritized
As some tools will not work using the input datasets ldda IDs
To bypass this issue, we need to retrieve the datasets imported into an history using this method
As some tools will not work using the input datasets ldda IDs we need to retrieve the datasets IDs imported
into an history
TODO: call every tool of workflows with hda datasets ids (clearer and more robust)
:return:
"""
......@@ -372,8 +373,6 @@ class RunWorkflow(speciesData.SpeciesData):
return{"genome_hda_id": genome_dataset_hda_id, "transcripts_hda_id": transcripts_dataset_hda_id,
"proteins_hda_id": proteins_datasets_hda_id, "gff_hda_id": gff_dataset_hda_id}
def get_organism_and_analyses_ids(self):
"""
Retrieve current organism ID and OGS and genome chado analyses IDs (needed to run some tools as Tripal/Chado
......@@ -435,6 +434,7 @@ class RunWorkflow(speciesData.SpeciesData):
except IndexError:
logging.debug("no matching genome analysis exists in the instance's chado database")
return {"org_id": self.org_id, "genome_analysis_id": self.genome_analysis_id, "ogs_analysis_id": self.ogs_analysis_id}
if __name__ == "__main__":
......@@ -463,6 +463,11 @@ if __name__ == "__main__":
type=str,
help="Worfklow to run")
parser.add_argument("--setup",
help="Initialize the species history by adding the organism and associated analyses to the "
"chado database",
action="store_true")
args = parser.parse_args()
if args.verbose:
......@@ -524,8 +529,6 @@ if __name__ == "__main__":
run_workflow_for_current_organism.genus_lowercase,
run_workflow_for_current_organism.species)
# TODO: Create distinct methods to call different pre-set workflows using CL arguments/config options (i.e load-chado, jbrowse, functional-annotation, orthology, ...)
# If input workflow is Chado_load_Tripal_synchronize.ga
if "Chado_load_Tripal_synchronize" in str(workflow):
......@@ -535,16 +538,16 @@ if __name__ == "__main__":
run_workflow_for_current_organism.set_get_history()
# run_workflow_for_current_organism.get_species_history_id()
# Prepare the instance+history for the current organism (add organism and analyses in Chado) TODO: add argument "setup"
# Prepare the instance+history for the current organism (add organism and analyses in Chado)
# (although it should pose no problem as the "Chado add" refuses to duplicate an analysis/organism anyway)
# run_workflow_for_current_organism.prepare_history()
if args.setup:
run_workflow_for_current_organism.prepare_history()
# Get the attributes of the instance and project data files
run_workflow_for_current_organism.get_instance_attributes()
run_workflow_for_current_organism.get_organism_and_analyses_ids()
# Import datasets into history
# TODO: it seems it is not required anymore since using "ldda" option for datasets in the workflow datamap doesn't need files from history
# Import datasets into history and retrieve their hda IDs
run_workflow_for_current_organism.import_datasets_into_history()
run_workflow_for_current_organism.get_datasets_hda_ids()
......@@ -599,7 +602,6 @@ if __name__ == "__main__":
datamap=run_workflow_for_current_organism.datamap,
workflow_name="Chado load Tripal synchronize")
# Jbrowse creation workflow
elif "Jbrowse" in str(workflow):
......@@ -609,8 +611,9 @@ if __name__ == "__main__":
run_workflow_for_current_organism.set_get_history()
run_workflow_for_current_organism.get_instance_attributes()
run_workflow_for_current_organism.get_organism_and_analyses_ids()
# Import datasets into history and get their hda IDs
run_workflow_for_current_organism.import_datasets_into_history()
hda_ids = run_workflow_for_current_organism.get_datasets_hda_ids() # Note: always call this function AFTER calling "import_datasets_into_history()"
hda_ids = run_workflow_for_current_organism.get_datasets_hda_ids() # Note: only call this function AFTER calling "import_datasets_into_history()"
# Debugging
# run_workflow_for_current_organism.get_invocation_report(workflow_name="Jbrowse")
......@@ -621,25 +624,16 @@ if __name__ == "__main__":
ADD_ORGANISM_TO_JBROWSE = "3"
workflow_parameters = {}
workflow_parameters[GENOME_FASTA_FILE] = {}
workflow_parameters[GFF_FILE] = {}
# Jbrowse custom feature url
workflow_parameters[ADD_JBROWSE] = {"jb_menu": {"menu_url": "http://{hostname}:{port}/sp/{genus_sp}/feature/{Genus}/{species}/{id}".format(hostname=run_workflow_for_current_organism.config["hostname"],
port=run_workflow_for_current_organism.config["http_port"],
genus_sp=run_workflow_for_current_organism.genus_species,
Genus=run_workflow_for_current_organism.genus_uppercase,
species=run_workflow_for_current_organism.species,
id="id")}}
# Organism to add to the Jbrowse "container" (consists of a name and an id, not tied to the galaxy instance or chado/tripal names and ids)
workflow_parameters[ADD_ORGANISM_TO_JBROWSE] = {"name": [{"name": run_workflow_for_current_organism.full_name,
"unique_id": run_workflow_for_current_organism.abbreviation}]}
workflow_parameters[ADD_JBROWSE] = {}
workflow_parameters[ADD_ORGANISM_TO_JBROWSE] = {}
run_workflow_for_current_organism.datamap = {}
run_workflow_for_current_organism.datamap[GENOME_FASTA_FILE] = {"src": "hda", "id": hda_ids["genome_hda_id"]}
run_workflow_for_current_organism.datamap[GFF_FILE] = {"src": "hda", "id": hda_ids["gff_hda_id"]}
# run_workflow_for_current_organism.replace_placeholders_workflow(workflow_str=json.dumps(workflow))
# Run the jbrowse creation workflow
run_workflow_for_current_organism.run_workflow(workflow_path=workflow,
......@@ -647,9 +641,6 @@ if __name__ == "__main__":
datamap=run_workflow_for_current_organism.datamap,
workflow_name="Jbrowse")
else:
logging.critical("The galaxy container for %s is not ready yet!" % run_workflow_for_current_organism.full_name)
sys.exit()
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from _datetime import datetime
import os
import sys
import utilities
from _datetime import datetime
class SpeciesData:
"""
This class contains attributes and functions to interact with the galaxy container of the GGA environment
......@@ -40,7 +42,7 @@ class SpeciesData:
self.full_name_lowercase = self.full_name.lower()
self.abbreviation = "_".join(utilities.filter_empty_not_empty_items([self.genus_lowercase[0], self.species, self.strain, self.sex])["not_empty"])
self.genus_species = self.genus_lowercase + "_" + self.species
self.instance_url = "http://scratchgmodv1:8888/sp/" + self.genus_lowercase + "_" + self.species + "/galaxy/" # Testing with scratchgmodv1, is overwritten in the script by the provided hostname
self.instance_url = None
self.instance = None
self.history_id = None
self.library = None
......@@ -61,15 +63,10 @@ class SpeciesData:
# API key used to communicate with the galaxy instance. Cannot be used to do user-tied actions
self.config = None
# Custom config used to set environment variables inside containers, defaults to the one in the repo
if parameters_dictionary["data"]["parent_directory"] == "" or parameters_dictionary["data"][
"parent_directory"] == "/path/to/closest/parent/dir":
self.source_data_dir = "/shared/projects/phaeoexplorer/" # Testing path for phaeoexplorer data
if parameters_dictionary["data"]["parent_directory"] == "" or parameters_dictionary["data"]["parent_directory"] == "/path/to/closest/parent/dir": self.source_data_dir = "/shared/projects/phaeoexplorer/" # Testing path for phaeoexplorer data
else:
self.source_data_dir = parameters_dictionary["data"]["parent_directory"]
self.species_folder_name = "_".join(utilities.filter_empty_not_empty_items([self.genus_lowercase,
self.species,
self.strain,
self.sex])["not_empty"])
self.species_folder_name = "_".join(utilities.filter_empty_not_empty_items([self.genus_lowercase, self.species, self.strain, self.sex])["not_empty"])
self.existing_folders_cache = {}
self.bam_metadata_cache = {}
# # Directory/subdirectories where data files are located (fasta, gff, ...)
......
......@@ -27,7 +27,7 @@
{
"label": null,
"output_name": "output",
"uuid": "6fddadc5-3420-4747-920f-7816c926f16b"
"uuid": "c4a08da4-366c-4b48-b9e0-b382a5b3e6bc"
}
]
},
......@@ -54,7 +54,7 @@
{
"label": null,
"output_name": "output",
"uuid": "d29ec40e-71fb-4960-94d5-af4666ad1c1d"
"uuid": "a4dce244-cea0-4458-8f49-876762de565b"
}
]
},
......@@ -99,7 +99,7 @@
"owner": "iuc",
"tool_shed": "toolshed.g2.bx.psu.edu"
},
"tool_state": "{\"action\": {\"action_select\": \"create\", \"__current_case__\": 0}, \"gencode\": \"1\", \"jbgen\": {\"defaultLocation\": \"test\", \"trackPadding\": \"20\", \"shareLink\": \"true\", \"aboutDescription\": \"test\", \"show_tracklist\": \"true\", \"show_nav\": \"true\", \"show_overview\": \"true\", \"show_menu\": \"true\", \"hideGenomeOptions\": \"false\"}, \"plugins\": {\"BlastView\": \"true\", \"ComboTrackSelector\": \"false\", \"GCContent\": \"false\"}, \"reference_genome\": {\"genome_type_select\": \"history\", \"__current_case__\": 1, \"genome\": {\"__class__\": \"RuntimeValue\"}}, \"standalone\": \"minimal\", \"track_groups\": [{\"__index__\": 0, \"category\": \"Annotation\", \"data_tracks\": [{\"__index__\": 0, \"data_format\": {\"data_format_select\": \"gene_calls\", \"__current_case__\": 2, \"annotation\": {\"__class__\": \"RuntimeValue\"}, \"match_part\": {\"match_part_select\": \"false\", \"__current_case__\": 1}, \"index\": \"false\", \"track_config\": {\"track_class\": \"NeatHTMLFeatures/View/Track/NeatFeatures\", \"__current_case__\": 3, \"html_options\": {\"topLevelFeatures\": \"mRNA\"}}, \"jbstyle\": {\"style_classname\": \"transcript\", \"style_label\": \"product,name,id\", \"style_description\": \"note,description\", \"style_height\": \"10px\", \"max_height\": \"600\"}, \"jbcolor_scale\": {\"color_score\": {\"color_score_select\": \"none\", \"__current_case__\": 0, \"color\": {\"color_select\": \"automatic\", \"__current_case__\": 0}}}, \"jb_custom_config\": {\"option\": []}, \"jbmenu\": {\"track_menu\": [{\"__index__\": 0, \"menu_action\": \"iframeDialog\", \"menu_label\": \"View transcript report\", \"menu_title\": \"Transcript {id}\", \"menu_url\": {\"__class__\": \"RuntimeValue\"}, \"menu_icon\": \"dijitIconBookmark\"}]}, \"track_visibility\": \"default_off\", \"override_apollo_plugins\": \"False\", \"override_apollo_drag\": \"False\"}}]}], \"uglyTestingHack\": \"\", \"__page__\": null, \"__rerun_remap_job_id__\": null}",
"tool_state": "{\"action\": {\"action_select\": \"create\", \"__current_case__\": 0}, \"gencode\": \"1\", \"jbgen\": {\"defaultLocation\": \"test\", \"trackPadding\": \"20\", \"shareLink\": \"true\", \"aboutDescription\": \"test\", \"show_tracklist\": \"true\", \"show_nav\": \"true\", \"show_overview\": \"true\", \"show_menu\": \"true\", \"hideGenomeOptions\": \"false\"}, \"plugins\": {\"BlastView\": \"true\", \"ComboTrackSelector\": \"false\", \"GCContent\": \"false\"}, \"reference_genome\": {\"genome_type_select\": \"history\", \"__current_case__\": 1, \"genome\": {\"__class__\": \"RuntimeValue\"}}, \"standalone\": \"minimal\", \"track_groups\": [{\"__index__\": 0, \"category\": \"Annotation\", \"data_tracks\": [{\"__index__\": 0, \"data_format\": {\"data_format_select\": \"gene_calls\", \"__current_case__\": 2, \"annotation\": {\"__class__\": \"RuntimeValue\"}, \"match_part\": {\"match_part_select\": \"false\", \"__current_case__\": 1}, \"index\": \"false\", \"track_config\": {\"track_class\": \"NeatHTMLFeatures/View/Track/NeatFeatures\", \"__current_case__\": 3, \"html_options\": {\"topLevelFeatures\": \"mRNA\"}}, \"jbstyle\": {\"style_classname\": \"transcript\", \"style_label\": \"product,name,id\", \"style_description\": \"note,description\", \"style_height\": \"10px\", \"max_height\": \"600\"}, \"jbcolor_scale\": {\"color_score\": {\"color_score_select\": \"none\", \"__current_case__\": 0, \"color\": {\"color_select\": \"automatic\", \"__current_case__\": 0}}}, \"jb_custom_config\": {\"option\": []}, \"jbmenu\": {\"track_menu\": [{\"__index__\": 0, \"menu_action\": \"iframeDialog\", \"menu_label\": \"View transcript report\", \"menu_title\": \"Transcript {id}\", \"menu_url\": \"__MENU_URL__\", \"menu_icon\": \"dijitIconBookmark\"}]}, \"track_visibility\": \"default_off\", \"override_apollo_plugins\": \"False\", \"override_apollo_drag\": \"False\"}}]}], \"uglyTestingHack\": \"\", \"__page__\": null, \"__rerun_remap_job_id__\": null}",
"tool_version": "1.16.10+galaxy0",
"type": "tool",
"uuid": "ba7d15fd-8ffd-407d-9a45-47cd4be68bd2",
......@@ -143,7 +143,7 @@
"owner": "gga",
"tool_shed": "toolshed.g2.bx.psu.edu"
},
"tool_state": "{\"organisms\": [{\"__index__\": 0, \"jbrowse\": {\"__class__\": \"ConnectedValue\"}, \"name\": {\"__class__\": \"RuntimeValue\"}, \"advanced\": {\"unique_id\": {\"__class__\": \"RuntimeValue\"}}}], \"__page__\": null, \"__rerun_remap_job_id__\": null}",
"tool_state": "{\"organisms\": [{\"__index__\": 0, \"jbrowse\": {\"__class__\": \"RuntimeValue\"}, \"name\": \"__FULL_NAME__\", \"advanced\": {\"unique_id\": \"__UNIQUE_ID__\"}}], \"__page__\": null, \"__rerun_remap_job_id__\": null}",
"tool_version": "0.5.1",
"type": "tool",
"uuid": "1cf25ca3-2287-4b82-9e93-b8828eed70a2",
......@@ -157,6 +157,6 @@
}
},
"tags": [],
"uuid": "7745ddc9-190a-436d-9bd3-2318e9d568a8",
"version": 0
"uuid": "639ceed7-d4f9-456f-b5f3-5cd2cc65ddca",
"version": 1
}
\ No newline at end of file
......@@ -27,7 +27,7 @@
{
"label": null,
"output_name": "output",
"uuid": "6955cc2b-d4d6-484b-8a89-6e4c5dcca879"
"uuid": "ac834ebd-236e-4539-86da-916da1ac8c5a"
}
]
},
......@@ -54,7 +54,7 @@
{
"label": null,
"output_name": "output",
"uuid": "2b6950b9-6e05-478b-a548-66de8230d217"
"uuid": "a9c468b7-935b-49b3-83d0-6eabafae8daf"
}
]
},
......@@ -73,7 +73,12 @@
"output_name": "output"
}
},
"inputs": [],
"inputs": [
{
"description": "runtime parameter for tool JBrowse",
"name": "reference_genome"
}
],
"label": null,
"name": "JBrowse",
"outputs": [
......@@ -94,7 +99,7 @@
"owner": "iuc",
"tool_shed": "toolshed.g2.bx.psu.edu"
},
"tool_state": "{\"action\": {\"action_select\": \"create\", \"__current_case__\": 0}, \"gencode\": \"1\", \"jbgen\": {\"defaultLocation\": \"\", \"trackPadding\": \"20\", \"shareLink\": \"true\", \"aboutDescription\": \"\", \"show_tracklist\": \"true\", \"show_nav\": \"true\", \"show_overview\": \"true\", \"show_menu\": \"true\", \"hideGenomeOptions\": \"false\"}, \"plugins\": {\"BlastView\": \"true\", \"ComboTrackSelector\": \"false\", \"GCContent\": \"false\"}, \"reference_genome\": {\"genome_type_select\": \"history\", \"__current_case__\": 1, \"genome\": {\"__class__\": \"ConnectedValue\"}}, \"standalone\": \"minimal\", \"track_groups\": [{\"__index__\": 0, \"category\": \"Annotation\", \"data_tracks\": [{\"__index__\": 0, \"data_format\": {\"data_format_select\": \"gene_calls\", \"__current_case__\": 2, \"annotation\": {\"__class__\": \"ConnectedValue\"}, \"match_part\": {\"match_part_select\": \"false\", \"__current_case__\": 1}, \"index\": \"false\", \"track_config\": {\"track_class\": \"NeatHTMLFeatures/View/Track/NeatFeatures\", \"__current_case__\": 3, \"html_options\": {\"topLevelFeatures\": \"\"}}, \"jbstyle\": {\"style_classname\": \"transcript\", \"style_label\": \"product,name,id\", \"style_description\": \"note,description\", \"style_height\": \"10px\", \"max_height\": \"600\"}, \"jbcolor_scale\": {\"color_score\": {\"color_score_select\": \"none\", \"__current_case__\": 0, \"color\": {\"color_select\": \"automatic\", \"__current_case__\": 0}}}, \"jb_custom_config\": {\"option\": []}, \"jbmenu\": {\"track_menu\": [{\"__index__\": 0, \"menu_action\": \"iframeDialog\", \"menu_label\": \"View transcript report\", \"menu_title\": \"Transcript {id}\", \"menu_url\": {\"__class__\": \"RuntimeValue\"}, \"menu_icon\": \"dijitIconBookmark\"}]}, \"track_visibility\": \"default_off\", \"override_apollo_plugins\": \"False\", \"override_apollo_drag\": \"False\"}}]}], \"uglyTestingHack\": \"\", \"__page__\": "1", \"__rerun_remap_job_id__\": "True"}",
"tool_state": "{\"action\": {\"action_select\": \"create\", \"__current_case__\": 0}, \"gencode\": \"1\", \"jbgen\": {\"defaultLocation\": \"test\", \"trackPadding\": \"20\", \"shareLink\": \"true\", \"aboutDescription\": \"test\", \"show_tracklist\": \"true\", \"show_nav\": \"true\", \"show_overview\": \"true\", \"show_menu\": \"true\", \"hideGenomeOptions\": \"false\"}, \"plugins\": {\"BlastView\": \"true\", \"ComboTrackSelector\": \"false\", \"GCContent\": \"false\"}, \"reference_genome\": {\"genome_type_select\": \"history\", \"__current_case__\": 1, \"genome\": {\"__class__\": \"RuntimeValue\"}}, \"standalone\": \"minimal\", \"track_groups\": [{\"__index__\": 0, \"category\": \"Annotation\", \"data_tracks\": [{\"__index__\": 0, \"data_format\": {\"data_format_select\": \"gene_calls\", \"__current_case__\": 2, \"annotation\": {\"__class__\": \"RuntimeValue\"}, \"match_part\": {\"match_part_select\": \"false\", \"__current_case__\": 1}, \"index\": \"false\", \"track_config\": {\"track_class\": \"NeatHTMLFeatures/View/Track/NeatFeatures\", \"__current_case__\": 3, \"html_options\": {\"topLevelFeatures\": \"mRNA\"}}, \"jbstyle\": {\"style_classname\": \"transcript\", \"style_label\": \"product,name,id\", \"style_description\": \"note,description\", \"style_height\": \"10px\", \"max_height\": \"600\"}, \"jbcolor_scale\": {\"color_score\": {\"color_score_select\": \"none\", \"__current_case__\": 0, \"color\": {\"color_select\": \"automatic\", \"__current_case__\": 0}}}, \"jb_custom_config\": {\"option\": []}, \"jbmenu\": {\"track_menu\": [{\"__index__\": 0, \"menu_action\": \"iframeDialog\", \"menu_label\": \"View transcript report\", \"menu_title\": \"Transcript {id}\", \"menu_url\": \"{{ MENU_URL }}\", \"menu_icon\": \"dijitIconBookmark\"}]}, \"track_visibility\": \"default_off\", \"override_apollo_plugins\": \"False\", \"override_apollo_drag\": \"False\"}}]}], \"uglyTestingHack\": \"\", \"__page__\": null, \"__rerun_remap_job_id__\": null}",
"tool_version": "1.16.10+galaxy0",
"type": "tool",
"uuid": "ba7d15fd-8ffd-407d-9a45-47cd4be68bd2",
......@@ -152,6 +157,6 @@
}
},
"tags": [],
"uuid": "82768602-9800-4868-ac2b-a5cbacb79b8a",
"version": 2
}
"uuid": "77f04b69-2dec-430b-891f-f4ddbf04d1db",
"version": 1
}
\ No newline at end of file
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment