diff --git a/constants.py b/constants.py
index 1e3be48b57ecaee5b6e1d4834f6f8c2e6ce2317a..5d34b825a25554733b380df54203ff63b3677de6 100644
--- a/constants.py
+++ b/constants.py
@@ -1,3 +1,6 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+
 # Constants used in the input yaml
 ORG_PARAM_NAME = "name"
 ORG_PARAM_DESC = "description"
@@ -21,12 +24,10 @@ ORG_PARAM_DATA_BLASTP_PATH = "blastp_path"
 ORG_PARAM_DATA_BLASTX_PATH = "blastx_path"
 ORG_PARAM_DATA_GENOME_VERSION = "genome_version"
 ORG_PARAM_DATA_OGS_VERSION = "ogs_version"
-ORG_PARAM_DATA_PERFORMED_BY = "performed_by"
 ORG_PARAM_SERVICES = "services"
 ORG_PARAM_SERVICES_BLAST = "blast"
 ORG_PARAM_SERVICES_GO = "go"
 
-
 # Constants used in the config yaml file
 CONF_ALL_HOSTNAME = "hostname"
 CONF_ALL_HTTP_PORT = "http_port"
@@ -39,6 +40,7 @@ CONF_ALL_AUTHELIA_DB_POSTGRES_PASSWORD = "authelia_db_postgres_password"
 CONF_GALAXY_DEFAULT_ADMIN_EMAIL = "galaxy_default_admin_email"
 CONF_GALAXY_DEFAULT_ADMIN_USER = "galaxy_default_admin_user"
 CONF_GALAXY_DEFAULT_ADMIN_PASSWORD = "galaxy_default_admin_password"
+CONF_GALAXY_DEFAULT_ADMIN_KEY = "galaxy_default_admin_key"
 CONF_GALAXY_CONFIG_REMOTE_USER_MAILDOMAIN = "galaxy_config_remote_user_maildomain"
 CONF_GALAXY_PERSIST_DATA = "galaxy_persist_data"
 CONF_TRIPAL_PASSWORD = "tripal_password"
@@ -47,14 +49,27 @@ CONF_TRIPAL_THEME_NAME = "tripal_theme_name"
 CONF_TRIPAL_THEME_GIT_CLONE = "tripal_theme_git_clone"
 CONF_JBROWSE_MENU_URL = "jbrowse_menu_url"
 
+# Data
+FILENAME_SUFFIX_TRANSCRIPTS = "transcripts_gff.fasta"
+FILENAME_SUFFIX_PROTEINS = "proteins.fasta"
+FILENAME_SUFFIX_INTERPRO = "interproscan.xml"
+FILENAME_SUFFIX_BLASTP = "diamond_blastp_vs_uniref90.xml" # Temporary constant: this value should be in the organism input file
+FILENAME_SUFFIX_BLASTX = "diamond_blastx_vs_uniref90.xml" # Temporary constant: this value should be in the organism input file
+FILENAME_SUFFIX_ORTHOFINDER = "orthologous_one2one_vs_Ec32.tsv" # Temporary constant: this value should be in the organism input file
+DATA_DATE = "2021-02-24" # Temporary constant: this value should be in the organism input file, for each data
+
 # default config file
 DEFAULT_CONFIG = "examples/config"
 
+# Galaxy tools
 GET_ORGANISMS_TOOL = "toolshed.g2.bx.psu.edu/repos/gga/chado_organism_get_organisms/organism_get_organisms/2.3.4+galaxy0"
 DELETE_ORGANISMS_TOOL = "toolshed.g2.bx.psu.edu/repos/gga/chado_organism_delete_organisms/organism_delete_organisms/2.3.4+galaxy0"
 
+# Galaxy library
 HOST_DATA_DIR='src_data'
 CONTAINER_DATA_DIR_ROOT='/project_data'
+GALAXY_LIBRARY_NAME = 'Project Data'
+GALAXY_LIBRARY_DESC = 'Data for current genome annotation project'
 
-REQUIRED_PARAMETERS = [CONF_ALL_HOSTNAME, CONF_ALL_HTTP_PORT, CONF_GALAXY_DEFAULT_ADMIN_EMAIL, CONF_GALAXY_DEFAULT_ADMIN_USER, 
+REQUIRED_PARAMETERS = [CONF_ALL_HOSTNAME, CONF_ALL_HTTP_PORT, CONF_GALAXY_DEFAULT_ADMIN_EMAIL, CONF_GALAXY_DEFAULT_ADMIN_USER,
 					   CONF_GALAXY_DEFAULT_ADMIN_PASSWORD, CONF_TRIPAL_PASSWORD, CONF_GALAXY_CONFIG_REMOTE_USER_MAILDOMAIN]
diff --git a/constants_phaeo.py b/constants_phaeo.py
new file mode 100644
index 0000000000000000000000000000000000000000..ace8d66dc798f42024d51515afc75967ac3005bd
--- /dev/null
+++ b/constants_phaeo.py
@@ -0,0 +1,76 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+
+import constants
+
+### Workflows
+# WARNING: Be very careful about how the workflow is "organized" (i.e the order of the steps/datasets, check the .ga if there is any error)
+
+WORKFLOWS_PATH = "workflows_phaeoexplorer/"
+WF_LOAD_GFF_JB = "load_fasta_gff_jbrowse"
+
+WF_LOAD_GFF_JB_1_ORG_FILE = "Galaxy-Workflow-chado_load_tripal_synchronize_jbrowse_1org_v6.ga"
+WF_LOAD_GFF_JB_1_ORG_INPUT_GENOME = "0"
+WF_LOAD_GFF_JB_1_ORG_INPUT_GFF = "1"
+WF_LOAD_GFF_JB_1_ORG_INPUT_PROTEINS = "2"
+WF_LOAD_GFF_JB_1_ORG_STEP_LOAD_FASTA = "3"
+WF_LOAD_GFF_JB_1_ORG_STEP_JBROWSE = "4"
+WF_LOAD_GFF_JB_1_ORG_STEP_LOAD_GFF = "5"
+WF_LOAD_GFF_JB_1_ORG_STEP_JB_TO_CONTAINER = "6"
+WF_LOAD_GFF_JB_1_ORG_STEP_FEATURE_SYNC = "7"
+WF_LOAD_GFF_JB_1_ORG_STEP_POPULATE_VIEWS = "8"
+WF_LOAD_GFF_JB_1_ORG_STEP_INDEX = "9"
+
+WF_LOAD_GFF_JB_2_ORG_FILE = "Galaxy-Workflow-chado_load_tripal_synchronize_jbrowse_2org_v6.ga"
+WF_LOAD_GFF_JB_2_ORG_INPUT_GENOME_ORG1 = "0"
+WF_LOAD_GFF_JB_2_ORG_INPUT_GFF_ORG1 = "1"
+WF_LOAD_GFF_JB_2_ORG_INPUT_PROTEINS_ORG1 = "2"
+WF_LOAD_GFF_JB_2_ORG_INPUT_GENOME_ORG2 = "3"
+WF_LOAD_GFF_JB_2_ORG_INPUT_GFF_ORG2 = "4"
+WF_LOAD_GFF_JB_2_ORG_INPUT_PROTEINS_ORG2 = "5"
+WF_LOAD_GFF_JB_2_ORG_STEP_LOAD_FASTA_ORG1 = "6"
+WF_LOAD_GFF_JB_2_ORG_STEP_JBROWSE_ORG1 = "7"
+WF_LOAD_GFF_JB_2_ORG_STEP_JRBOWSE_ORG2 = "8"
+WF_LOAD_GFF_JB_2_ORG_STEP_LOAD_GFF_ORG1 = "9"
+WF_LOAD_GFF_JB_2_ORG_STEP_JB_TO_CONTAINER = "10"
+WF_LOAD_GFF_JB_2_ORG_STEP_FEATURE_SYNC_ORG1 = "11"
+WF_LOAD_GFF_JB_2_ORG_STEP_LOAD_FASTA_ORG2 = "12"
+WF_LOAD_GFF_JB_2_ORG_STEP_LOAD_GFF_ORG2 = "13"
+WF_LOAD_GFF_JB_2_ORG_STEP_FEATURE_SYNC_ORG2 = "14"
+WF_LOAD_GFF_JB_2_ORG_STEP_POPULATE_VIEWS = "15"
+WF_LOAD_GFF_JB_2_ORG_STEP_INDEX = "16"
+
+
+### Galaxy tools
+
+ADD_ORGANISM_TOOL_NAME = "toolshed.g2.bx.psu.edu/repos/gga/chado_organism_add_organism/organism_add_organism/"
+ADD_ORGANISM_TOOL_VERSION = "2.3.4+galaxy0"
+ADD_ORGANISM_TOOL_ID = ADD_ORGANISM_TOOL_NAME + ADD_ORGANISM_TOOL_VERSION
+ADD_ORGANISM_TOOL_CHANGESET_REVISION = "1f12b9650028"
+
+ADD_ANALYSIS_TOOL_NAME = "toolshed.g2.bx.psu.edu/repos/gga/chado_analysis_add_analysis/analysis_add_analysis/"
+ADD_ANALYSIS_TOOL_VERSION = "2.3.4+galaxy0"
+ADD_ANALYSIS_TOOL_ID= ADD_ANALYSIS_TOOL_NAME + ADD_ANALYSIS_TOOL_VERSION
+ADD_ANALYSIS_TOOL_CHANGESET_REVISION = "10b2b1c70e69"
+ADD_ANALYSIS_TOOL_PARAM_PROGRAM = "Performed by Genoscope"
+ADD_ANALYSIS_TOOL_PARAM_DATE = constants.DATA_DATE
+
+GET_ORGANISMS_TOOL_NAME = "toolshed.g2.bx.psu.edu/repos/gga/chado_organism_get_organisms/organism_get_organisms/"
+GET_ORGANISMS_TOOL_VERSION = "2.3.4+galaxy0"
+GET_ORGANISMS_TOOL_ID = GET_ORGANISMS_TOOL_NAME + GET_ORGANISMS_TOOL_VERSION
+GET_ORGANISMS_TOOL_CHANGESET_REVISION = "831229e6cda2"
+
+GET_ANALYSES_TOOL_NAME = "toolshed.g2.bx.psu.edu/repos/gga/chado_analysis_get_analyses/analysis_get_analyses/"
+GET_ANALYSES_TOOL_VERSION = "2.3.4+galaxy0"
+GET_ANALYSES_TOOL_ID = GET_ANALYSES_TOOL_NAME + GET_ANALYSES_TOOL_VERSION
+GET_ANALYSES_TOOL_CHANGESET_REVISION = "a867923f555e"
+
+ANALYSIS_SYNC_TOOL_NAME = "toolshed.g2.bx.psu.edu/repos/gga/tripal_analysis_sync/analysis_sync/"
+ANALYSIS_SYNC_TOOL_VERSION = "3.2.1.0"
+ANALYSIS_SYNC_TOOL_ID = ANALYSIS_SYNC_TOOL_NAME + ANALYSIS_SYNC_TOOL_VERSION
+ANALYSIS_SYNC_TOOL_CHANGESET_REVISION = "f487ff676088"
+
+ORGANISM_SYNC_TOOL_NAME = "toolshed.g2.bx.psu.edu/repos/gga/tripal_organism_sync/organism_sync/"
+ORGANISM_SYNC_TOOL_VERSION = "3.2.1.0"
+ORGANISM_SYNC_TOOL_ID = ORGANISM_SYNC_TOOL_NAME + ORGANISM_SYNC_TOOL_VERSION
+ORGANISM_SYNC_TOOL_CHANGESET_REVISION = "afd5d92745fb"
diff --git a/examples/citrus_sinensis.yml b/examples/citrus_sinensis.yml
index af423e09ebe9e927c076f122594e896fc9c414f9..68f2f6e6f9a5bf0a7d53c5db5d47a89d484fb490 100644
--- a/examples/citrus_sinensis.yml
+++ b/examples/citrus_sinensis.yml
@@ -35,7 +35,6 @@
     genome_version: 1.0
     # Same as genome version, but for the OGS analysis
     ogs_version: 1.0
-    performed_by:
   services:
   # List the optional services/resources to add
     blast: 0 # "1" to add links to blast form, "some/url" to specify, multiple urls as "Name1=url1&Name2=url2", "0" to disable it. Default: "0"
diff --git a/examples/config.yml b/examples/config.yml
index 6c5a780b2cd0186e8efca5e5443949cc8188dd66..695ab037edca67cef70d507c1cc9757ca42d058f 100644
--- a/examples/config.yml
+++ b/examples/config.yml
@@ -1,19 +1,20 @@
 # This is the configuration template file used by the gga_init.py, gga_load_data.py and run_workflow.py scripts
 
 # These variables are used by several services at once or the paths to import sensitive files
-hostname: localhost  # Required. The hosting machine name
-http_port: 8888  # Required. The HTTP port docker will use on the hosting machine
-https_port: 8889  # Required for Authelia. The HTTPS port docker will use on the hosting machine
+hostname: fancydb.com  # Required. The hostname that will be used to access the application (http://hostname/sp/genus_species/)), possibly with a reverse proxy redirecting the requests to the host machine with the right port (http_port).
+http_port: 80  # Required. The HTTP port docker will use on the hosting machine
+https_port: 443  # Required for Authelia. The HTTPS port docker will use on the hosting machine
 proxy_ip: XXX.XXX.XXX.XXX  # Required. IP of the upstream proxy (used by Traefik)
-authentication_domain_name: XXXXXXXX  #  Required for Authelia. The authentication domain name.
+authentication_domain_name: auth.fancydb.com  #  Required for Authelia. The authentication domain name.
 authelia_config_path: /path/to/authelia_config.yml #  Required for Authelia. Path to the Authelia configuration file
 authelia_secrets_env_path: /path/to/authelia/secrets.env #  Required for Authelia. Path to the env file containing passwords and secrets needed for Authelia
-authelia_db_postgres_password: XXXXXXXX #  Required for Authelia.
+authelia_db_postgres_password: psqlpwd #  Required for Authelia.
 
 # galaxy-specific variables
 galaxy_default_admin_email: gga@galaxy.org  # Required
 galaxy_default_admin_user: gga  # Required
 galaxy_default_admin_password: password  # Required
+galaxy_default_admin_key: myfakekey  # Required
 galaxy_config_remote_user_maildomain: mydomain.com  # Required. The maildomain used by Galaxy authentication
 galaxy_persist_data: "True"  # Optional (default: True). If False, docker data will NOT be persisted on your host's file system and will be lost any time the galaxy container is recreated. Do not set this variable to "False" for production
 
diff --git a/gga_get_data.py b/gga_get_data.py
index 992e5c6c46f59d0bfdd2cecf14f99d3be21f0a95..89788e21ef9eb7999dcd65665800275b80b4dfa4 100755
--- a/gga_get_data.py
+++ b/gga_get_data.py
@@ -5,7 +5,6 @@ import argparse
 import os
 import logging
 import sys
-import time
 import shutil
 
 import utilities
@@ -65,13 +64,6 @@ class GetData(speciesData.SpeciesData):
 
         logging.info("src_data directory tree generated for %s" % self.full_name)
 
-    def get_last_modified_time_string(self, filePath):
-        # give the last modification date for the file, with format '20190130'
-        lastModifiedTimestamp = os.path.getmtime(filePath)
-        lastModifiedTimeStructure = time.localtime(lastModifiedTimestamp)
-        lastModifiedDate = time.strftime("%Y%m%d", lastModifiedTimeStructure)
-        return lastModifiedDate
-
     def get_source_data_files_from_path(self):
         """
         Find source data files and copy them into the src_data dir tree
@@ -101,7 +93,7 @@ class GetData(speciesData.SpeciesData):
         for k, v in genome_datasets.items():
             if v:  # If dataset is not present in input file, skip copy
                 logging.info("Copying {0} ({1}) into {2}".format(k, v, organism_genome_dir))
-                genome_fname = "{0}_v{1}.fasta".format(self.dataset_prefix, self.genome_version)
+                genome_fname = self.genome_filename
                 try:
                     shutil.copyfile(os.path.abspath(v), os.path.join(organism_genome_dir, genome_fname))
                 except Exception as exc:
@@ -111,19 +103,19 @@ class GetData(speciesData.SpeciesData):
             if v:  # If dataset is not present in input file, skip copy
                 dataset_fname = ""
                 if k == constants.ORG_PARAM_DATA_GFF_PATH:
-                    dataset_fname = "{0}_OGS{1}_{2}.gff".format(self.dataset_prefix, self.ogs_version, self.get_last_modified_time_string(os.path.abspath(v)))
+                    dataset_fname = self.gff_filename
                 elif k == constants.ORG_PARAM_DATA_TRANSCRIPTS_PATH:
-                    dataset_fname = "{0}_OGS{1}_transcripts.fasta".format(self.dataset_prefix, self.ogs_version)
+                    dataset_fname = self.transcripts_filename
                 elif k == constants.ORG_PARAM_DATA_PROTEINS_PATH:
-                    dataset_fname = "{0}_OGS{1}_proteins.fasta".format(self.dataset_prefix, self.ogs_version)
+                    dataset_fname = self.proteins_filename
                 elif k == constants.ORG_PARAM_DATA_ORTHOFINDER_PATH:
-                    dataset_fname = "{0}_OGS{1}_orthofinder.tsv".format(self.dataset_prefix, self.ogs_version)
+                    dataset_fname = self.orthofinder_filename
                 elif k == constants.ORG_PARAM_DATA_INTERPRO_PATH:
-                    dataset_fname = "{0}_OGS{1}_interproscan.xml".format(self.dataset_prefix, self.ogs_version)
+                    dataset_fname = self.interpro_filename
                 elif k == constants.ORG_PARAM_DATA_BLASTP_PATH:
-                    dataset_fname = "{0}_OGS{1}_blastp.xml".format(self.dataset_prefix, self.ogs_version)
+                    dataset_fname = self.blastp_filename
                 elif k == constants.ORG_PARAM_DATA_BLASTX_PATH:
-                    dataset_fname = "{0}_OGS{1}_blastx.xml".format(self.dataset_prefix, self.ogs_version)
+                    dataset_fname = self.blastx_filename
                 logging.info("Copying {0} ({1}) into {2}".format(k, v, organism_annotation_dir))
                 try:
                     shutil.copyfile(os.path.abspath(v), os.path.join(organism_annotation_dir, dataset_fname))
diff --git a/gga_init.py b/gga_init.py
index 7416aab594712e352efcd7df75e40bbcf6f2d71e..1708aa5138e71a2dc3976fad517d663e9d5293b8 100755
--- a/gga_init.py
+++ b/gga_init.py
@@ -37,6 +37,9 @@ class DeploySpeciesStack(speciesData.SpeciesData):
     the organism's directory tree to create the required docker-compose files and stack deployment
 
     """
+    def __init__(self, parameters_dictionary):
+        self.picture_path = None
+        super().__init__(parameters_dictionary)
 
     def make_directory_tree(self):
         """
@@ -131,7 +134,7 @@ class DeploySpeciesStack(speciesData.SpeciesData):
                         "genus_species_sex": "{0}_{1}_{2}".format(self.genus_lowercase, self.species_lowercase, self.sex),
                         "strain": self.strain, "sex": self.sex, "Genus_species": "{0} {1}".format(self.genus_uppercase, self.species_lowercase),
                         "blast": self.blast, "go": self.go, "picture_path": self.picture_path}
-        if (len(self.config.keys()) == 0):
+        if len(self.config.keys()) == 0:
             logging.error("Empty config dictionary")
         # Merge the two dicts
         render_vars = {**self.config, **input_vars}
@@ -161,19 +164,6 @@ class DeploySpeciesStack(speciesData.SpeciesData):
         os.chdir(self.main_dir)
 
 
-    def make_orthology_compose_files(self):
-        """
-        Create/update orthology compose files
-
-        :return:
-        """
-
-        os.chdir(self.main_dir)
-
-        make_dirs["./orthology", "./orthology/src_data", "./orthology/src_data/genomes", 
-                       "./orthology/src_data/gff", "./orthology/src_data/newicks", "./orthology/src_data/proteomes"]
-
-
 def make_dirs(dir_paths_li):
     """
     Recursively create directories from a list of paths with a try-catch condition
@@ -386,7 +376,7 @@ if __name__ == "__main__":
     else:
         config_file = os.path.join(os.path.dirname(os.path.realpath(sys.argv[0])), constants.DEFAULT_CONFIG)
     config = utilities.parse_config(config_file)
-    if (len(config.keys()) == 0):
+    if len(config.keys()) == 0:
         logging.error("Empty config dictionary")
 
     main_dir = None
diff --git a/gga_load_data.py b/gga_load_data.py
index 74634d6d5f9d158a39ff01a30df82d8b45336bc5..726b4e907594cfefccca98f1a5edc846b5aa27e8 100755
--- a/gga_load_data.py
+++ b/gga_load_data.py
@@ -2,7 +2,6 @@
 # -*- coding: utf-8 -*-
 import re
 
-import bioblend
 import argparse
 import os
 import logging
@@ -10,11 +9,10 @@ import sys
 import time
 import json
 import yaml
-import subprocess
-from bioblend import galaxy
 from bioblend.galaxy.objects import GalaxyInstance
 
 import utilities
+import utilities_bioblend
 import speciesData
 import constants
 
@@ -40,55 +38,6 @@ class LoadData(speciesData.SpeciesData):
         self.bam_metadata_cache = {}
         super().__init__(parameters_dictionary)
 
-    def get_history(self):
-        """
-        Create or set the working history to the current species one
-
-        :return:
-        """
-        try:
-            histories = self.instance.histories.get_histories(name=str(self.genus_species))
-            if len(histories) == 1:
-                self.history_id = histories[0]["id"]
-                logging.debug("History ID set for {0} {1}: {2}".format(self.genus, self.species, self.history_id))
-            else:
-                logging.critical("Multiple histories exists for {1}: {2}".format(self.genus, self.species))
-        except IndexError:
-            logging.info("Creating history for {0} {1}".format(self.genus, self.species))
-            hist_dict = self.instance.histories.create_history(name=str(self.genus_species))
-            self.history_id = hist_dict["id"]
-            logging.debug("History ID set for {0} {1}: {2}".format(self.genus, self.species, self.history_id))
-
-        return self.history_id
-
-    def remove_homo_sapiens_from_db(self):
-        """
-        Run the GMOD tool to remove the "Homo sapiens" default organism from the original database
-        Will do nothing if H. sapiens isn't in the database
-
-        """
-
-        logging.debug("Getting 'Homo sapiens' ID in chado database")
-        get_sapiens_id_job_output_dataset_id = utilities.run_tool_and_get_single_output_dataset_id(
-            self.instance,
-            tool_id=constants.GET_ORGANISMS_TOOL, # If this version if not found, Galaxy will use the one that is found
-            history_id=self.history_id,
-            tool_inputs={"genus": "Homo", "species": "sapiens"})
-        get_sapiens_id_json_output = self.instance.datasets.download_dataset(dataset_id=get_sapiens_id_job_output_dataset_id)
-
-        logging.info("Deleting Homo 'sapiens' in the instance's chado database")
-        try:
-            get_sapiens_id_final_output = json.loads(get_sapiens_id_json_output)[0]
-            sapiens_id = str(get_sapiens_id_final_output["organism_id"])  # needs to be str to be recognized by the chado tool
-            utilities.run_tool(
-                self.instance,
-                tool_id=constants.DELETE_ORGANISMS_TOOL,
-                history_id=self.history_id,
-                tool_inputs={"organism": sapiens_id})
-        except IndexError:
-            logging.error("Homo sapiens isn't in the instance's chado database (IndexError)")
-            pass
-
     def purge_histories(self):
         """
         Delete all histories in the instance
@@ -114,10 +63,10 @@ class LoadData(speciesData.SpeciesData):
 
         data_dir_root=os.path.join(self.get_species_dir(), constants.HOST_DATA_DIR)
 
-        instance = GalaxyInstance(url=self.instance_url,
-                                              email=self.config[constants.CONF_GALAXY_DEFAULT_ADMIN_EMAIL],
-                                              password=self.config[constants.CONF_GALAXY_DEFAULT_ADMIN_PASSWORD]
-                                              )
+        gio = GalaxyInstance(url=self.instance_url,
+                             email=self.config[constants.CONF_GALAXY_DEFAULT_ADMIN_EMAIL],
+                             password=self.config[constants.CONF_GALAXY_DEFAULT_ADMIN_PASSWORD]
+                             )
 
         logging.info("Looking for project data in %s" % data_dir_root)
         folders = dict()
@@ -129,20 +78,20 @@ class LoadData(speciesData.SpeciesData):
 
         if folders:
             # Delete pre-existing lib (probably created by a previous call)
-            existing = instance.libraries.get_previews(name='Project Data')
+            existing = gio.libraries.get_previews(name=constants.GALAXY_LIBRARY_NAME)
             for lib in existing:
                 if not lib.deleted:
-                    logging.info('Pre-existing "Project Data" library %s found, removing it' % lib.id)
-                    instance.libraries.delete(lib.id)
+                    logging.info('Pre-existing {0} library {1} found, removing it'.format(constants.GALAXY_LIBRARY_NAME, lib.id))
+                    gio.libraries.delete(lib.id)
 
-            logging.info("Creating new 'Project Data' library")
-            prj_lib = instance.libraries.create('Project Data', 'Data for current genome annotation project')
-            self.library_id = prj_lib.id  # project data folder/library
-            logging.info("Library for {0}: {1}".format(self.full_name, self.library_id))
+            logging.info("Creating new %s library" % constants.GALAXY_LIBRARY_NAME)
+            prj_lib = gio.libraries.create(constants.GALAXY_LIBRARY_NAME, constants.GALAXY_LIBRARY_DESC)
+            library_id = prj_lib.id  # project data folder/library
+            logging.info("Library for {0}: {1}".format(self.full_name, library_id))
 
             for fname, files in folders.items():
                 if fname and files:
-                    folder_name = re.sub(data_dir_root + "/", "", fname)
+                    folder_name = re.sub(re.compile(data_dir_root + "/"), "", str(fname))
                     logging.info("Creating folder: %s" % folder_name)
                     folder = self.create_deep_folder(prj_lib, folder_name)
 
@@ -280,30 +229,33 @@ class LoadData(speciesData.SpeciesData):
                 logging.info("Did not find metadata in %s " % meta_file)
             return self.get_bam_label(dirname, bam_file)
 
-    def create_galaxy_instance(self):
-        """
-        Test the connection to the galaxy instance for the current organism
-        Exit if we cannot connect to the instance
-
-        """
-
-        logging.info("Connecting to the galaxy instance (%s)" % self.instance_url)
-        self.instance = galaxy.GalaxyInstance(url=self.instance_url,
-                                              email=self.config[constants.CONF_GALAXY_DEFAULT_ADMIN_EMAIL],
-                                              password=self.config[constants.CONF_GALAXY_DEFAULT_ADMIN_PASSWORD]
-                                              )
+def remove_homo_sapiens_from_db(instance, history_id):
+    """
+    Run the GMOD tool to remove the "Homo sapiens" default organism from the original database
+    Will do nothing if H. sapiens isn't in the database
 
-        try:
-            self.instance.histories.get_histories()
-        except bioblend.ConnectionError:
-            logging.critical("Cannot connect to galaxy instance (%s) " % self.instance_url)
-            sys.exit()
-        else:
-            logging.info("Successfully connected to galaxy instance (%s) " % self.instance_url)
+    """
 
-        return self.instance
+    logging.debug("Getting 'Homo sapiens' ID in chado database")
+    get_sapiens_id_json_output = utilities_bioblend.run_tool_and_download_single_output_dataset(
+        instance,
+        tool_id=constants.GET_ORGANISMS_TOOL, # If this version if not found, Galaxy will use the one that is found
+        history_id=history_id,
+        tool_inputs={"genus": "Homo", "species": "sapiens"})
+
+    logging.info("Deleting Homo 'sapiens' in the instance's chado database")
+    try:
+        get_sapiens_id_final_output = json.loads(get_sapiens_id_json_output)[0]
+        sapiens_id = str(get_sapiens_id_final_output["organism_id"])  # needs to be str to be recognized by the chado tool
+        utilities_bioblend.run_tool(
+            instance,
+            tool_id=constants.DELETE_ORGANISMS_TOOL,
+            history_id=history_id,
+            tool_inputs={"organism": sapiens_id})
+    except IndexError:
+        logging.error("Homo sapiens isn't in the instance's chado database (IndexError)")
+        pass
 
-    
 if __name__ == "__main__":
     parser = argparse.ArgumentParser(description="Load data into Galaxy library")
 
@@ -315,6 +267,10 @@ if __name__ == "__main__":
                         help="Increase output verbosity",
                         action="store_true")
 
+    parser.add_argument("-vv", "--very_verbose",
+                        help="Increase output verbosity",
+                        action="store_true")
+
     parser.add_argument("--config",
                         type=str,
                         help="Config path, default to 'examples/config.yml'")
@@ -325,11 +281,15 @@ if __name__ == "__main__":
 
     args = parser.parse_args()
 
-    if args.verbose:
+    if args.verbose or args.very_verbose:
         logging.basicConfig(level=logging.DEBUG)
     else:
         logging.basicConfig(level=logging.INFO)
 
+    if not args.very_verbose:
+        logging.getLogger("urllib3").setLevel(logging.INFO)
+        logging.getLogger("bioblend").setLevel(logging.INFO)
+
     # Parsing the config file if provided, using the default config otherwise
     if args.config:
         config_file = os.path.abspath(args.config)
@@ -364,30 +324,37 @@ if __name__ == "__main__":
         # Parse the config yaml file
         load_data_for_current_species.config = config
         # Set the instance url attribute -- Does not work with localhost on scratch (ALB)
-        load_data_for_current_species.instance_url = "http://localhost:{0}/sp/{1}_{2}/galaxy/".format(
+        load_data_for_current_species.instance_url = "http://localhost:{0}/sp/{1}/galaxy/".format(
                 load_data_for_current_species.config[constants.CONF_ALL_HTTP_PORT],
-                load_data_for_current_species.genus_lowercase,
-                load_data_for_current_species.species)
+                load_data_for_current_species.genus_species)
 
         # Check the galaxy container state and proceed if the galaxy services are up and running
-        if utilities.check_galaxy_state(genus_lowercase=load_data_for_current_species.genus_lowercase,
-                                        species=load_data_for_current_species.species,
+        if utilities_bioblend.check_galaxy_state(network_name=load_data_for_current_species.genus_species,
                                         script_dir=load_data_for_current_species.script_dir):
 
             # Create the Galaxy instance
-            load_data_for_current_species.instance = load_data_for_current_species.create_galaxy_instance()
+            load_data_for_current_species.instance = utilities_bioblend.get_galaxy_instance(
+                instance_url=load_data_for_current_species.instance_url,
+                email=load_data_for_current_species.config[constants.CONF_GALAXY_DEFAULT_ADMIN_EMAIL],
+                password=load_data_for_current_species.config[constants.CONF_GALAXY_DEFAULT_ADMIN_PASSWORD]
+            )
 
             # Load the datasets into a galaxy library
             logging.info("Setting up library for {0} {1}".format(load_data_for_current_species.genus, load_data_for_current_species.species))
             load_data_for_current_species.setup_library()
             logging.debug("Successfully set up library in galaxy for {0} {1}".format(load_data_for_current_species.genus, load_data_for_current_species.species))
 
-            # Set or get the history for the current organism
-            load_data_for_current_species.get_history()
-            
+            # Get default history
+            history_id = utilities_bioblend.get_history(
+                instance=load_data_for_current_species.instance,
+                history_name="Unnamed history")
+
             # Remove H. sapiens from database if here
             # TODO: set a dedicated history for removing H. sapiens (instead of doing it into a species history)
-            load_data_for_current_species.remove_homo_sapiens_from_db()
+            remove_homo_sapiens_from_db(
+                instance=load_data_for_current_species.instance,
+                history_id=history_id
+            )
 
             # logging.info("Importing datasets into history for %s" % load_data_for_current_species.full_name)
             # load_data_for_current_species.import_datasets_into_history()  # Option "--load-history"
diff --git a/gga_run_workflow_phaeo_blast_interpro.py b/gga_run_workflow_phaeo_blast_interpro.py
new file mode 100644
index 0000000000000000000000000000000000000000..ddbc5838adb6ee3ffa6bb73aca0e40ad7dc502e0
--- /dev/null
+++ b/gga_run_workflow_phaeo_blast_interpro.py
@@ -0,0 +1,1667 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+
+import bioblend.galaxy.objects
+import argparse
+import os
+import logging
+import sys
+import json
+import time
+
+from bioblend import galaxy
+
+import utilities
+import utilities_bioblend
+import speciesData
+import constants
+import constants_phaeo
+import runWorkflowPhaeo
+
+class OrgWorkflowParamJbrowse(runWorkflowPhaeo.OrgWorkflowParam):
+
+    def __init__(self, genus_species, strain_sex, genus_uppercase, chado_species_name, full_name, species_folder_name,
+                 org_id, history_id, instance, genome_analysis_id=None, ogs_analysis_id=None, blastp_analysis_id=None, interpro_analysis_id=None,
+                 genome_hda_id=None, gff_hda_id=None, transcripts_hda_id=None, proteins_hda_id=None, blastp_hda_id=None, blastx_hda_id=None, interproscan_hda_id=None):
+        self.genus_species = genus_species
+        self.strain_sex = strain_sex
+        self.genus_uppercase = genus_uppercase
+        self.chado_species_name = chado_species_name,
+        self.full_name = full_name
+        self.species_folder_name = species_folder_name
+        self.org_id = org_id
+        self.genome_analysis_id = genome_analysis_id
+        self.ogs_analysis_id = ogs_analysis_id
+        self.blastp_analysis_id = blastp_analysis_id
+        self.interpro_analysis_id = interpro_analysis_id
+        self.history_id = history_id
+        self.instance = instance
+        self.genome_hda_id = genome_hda_id
+        self.gff_hda_id = gff_hda_id
+        self.transcripts_hda_id = transcripts_hda_id
+        self.proteins_hda_id = proteins_hda_id
+        self.blastp_hda_id = blastp_hda_id
+        self.blastx_hda_id = blastx_hda_id
+        self.interproscan_hda_id = interproscan_hda_id
+        super().__init__(genus_species, strain_sex, genus_uppercase, chado_species_name, full_name, species_folder_name,
+                 org_id, history_id, instance)
+
+    def check_param_for_workflow_load_fasta_gff_jbrowse(self):
+        params = [self.genus_species, self.strain_sex, self.genus_uppercase,
+                  self.chado_species_name, self.full_name,
+                  self.species_folder_name, self.org_id,
+                  self.history_id, self.instance,
+                  self.genome_analysis_id, self.ogs_analysis_id,
+                  self.genome_hda_id, self.gff_hda_id, self.transcripts_hda_id, self.proteins_hda_id]
+        utilities_bioblend.check_wf_param(self.full_name, params)
+
+    def check_param_for_workflow_blastp(self):
+        params = [self.genus_species, self.strain_sex, self.genus_uppercase,
+                  self.chado_species_name, self.full_name,
+                  self.species_folder_name, self.org_id,
+                  self.history_id, self.instance,
+                  self.blastp_analysis_id,
+                  self.blastp_hda_id]
+        utilities_bioblend.check_wf_param(self.full_name, params)
+
+    def check_param_for_workflow_interpro(self):
+        params = [self.genus_species, self.strain_sex, self.genus_uppercase,
+                  self.chado_species_name, self.full_name,
+                  self.species_folder_name, self.org_id,
+                  self.history_id, self.instance,
+                  self.interpro_analysis_id,
+                  self.interproscan_hda_id]
+        utilities_bioblend.check_wf_param(self.full_name, params)
+
+
+class RunWorkflow(speciesData.SpeciesData):
+    """
+    Run a workflow into the galaxy instance's history of a given species
+
+
+    This script is made to work for a Phaeoexplorer-specific workflow, but can be adapted to run any workflow,
+    provided the user creates their own workflow in a .ga format, and change the set_parameters function
+    to have the correct parameters for their workflow
+
+    """
+
+    def __init__(self, parameters_dictionary):
+
+        super().__init__(parameters_dictionary)
+
+        self.chado_species_name = " ".join(utilities.filter_empty_not_empty_items(
+            [self.species, self.strain, self.sex])["not_empty"])
+
+        self.abbreviation = self.genus_uppercase[0] + ". " + self.chado_species_name
+
+        self.common = self.name
+        if not self.common_name is None and self.common_name != "":
+            self.common = self.common_name
+
+        self.history_name = str(self.genus_species)
+
+        self.genome_analysis_name = "genome v{0} of {1}".format(self.genome_version, self.full_name)
+        self.genome_analysis_programversion = "genome v{0}".format(self.genome_version)
+        self.genome_analysis_sourcename = self.full_name
+
+        self.ogs_analysis_name = "OGS{0} of {1}".format(self.ogs_version, self.full_name)
+        self.ogs_analysis_programversion = "OGS{0}".format(self.ogs_version)
+        self.ogs_analysis_sourcename = self.full_name
+
+        self.genome_hda_id = None
+        self.gff_hda_id = None
+        self.transcripts_hda_id = None
+        self.proteins_hda_id = None
+        self.blastp_hda_id = None
+        self.blastx_hda_id = None
+        self.interproscan_hda_id = None
+
+    def install_changesets_revisions_for_individual_tools(self):
+        """
+        This function is used to verify that installed tools called outside workflows have the correct versions and changesets
+        If it finds versions don't match, will install the correct version + changeset in the instance
+        Doesn't do anything if versions match
+
+        :return:
+        """
+
+        logging.info("Validating installed individual tools versions and changesets")
+
+        # Verify that the add_organism and add_analysis versions are correct in the instance
+        # changeset for 2.3.4+galaxy0 has to be manually found because there is no way to get the wanted changeset of a non installed tool via bioblend
+        # except for workflows (.ga) that already contain the changeset revisions inside the steps ids
+
+        utilities_bioblend.install_repository_revision(tool_id=constants_phaeo.GET_ORGANISMS_TOOL_ID,
+                                                       version=constants_phaeo.GET_ORGANISMS_TOOL_VERSION,
+                                                       changeset_revision=constants_phaeo.GET_ORGANISMS_TOOL_CHANGESET_REVISION,
+                                                       instance=self.instance)
+
+        utilities_bioblend.install_repository_revision(tool_id=constants_phaeo.GET_ANALYSES_TOOL_ID,
+                                                       version=constants_phaeo.GET_ANALYSES_TOOL_VERSION,
+                                                       changeset_revision=constants_phaeo.GET_ANALYSES_TOOL_CHANGESET_REVISION,
+                                                       instance=self.instance)
+
+        utilities_bioblend.install_repository_revision(tool_id=constants_phaeo.ADD_ORGANISM_TOOL_ID,
+                                                       version=constants_phaeo.ADD_ORGANISM_TOOL_VERSION,
+                                                       changeset_revision=constants_phaeo.ADD_ORGANISM_TOOL_CHANGESET_REVISION,
+                                                       instance=self.instance)
+
+        utilities_bioblend.install_repository_revision(tool_id=constants_phaeo.ADD_ANALYSIS_TOOL_ID,
+                                                       version=constants_phaeo.ADD_ANALYSIS_TOOL_VERSION,
+                                                       changeset_revision=constants_phaeo.ADD_ANALYSIS_TOOL_CHANGESET_REVISION,
+                                                       instance=self.instance)
+
+        utilities_bioblend.install_repository_revision(tool_id=constants_phaeo.ANALYSIS_SYNC_TOOL_ID,
+                                                       version=constants_phaeo.ANALYSIS_SYNC_TOOL_VERSION,
+                                                       changeset_revision=constants_phaeo.ANALYSIS_SYNC_TOOL_CHANGESET_REVISION,
+                                                       instance=self.instance)
+
+        utilities_bioblend.install_repository_revision(tool_id=constants_phaeo.ORGANISM_SYNC_TOOL_ID,
+                                                       version=constants_phaeo.ORGANISM_SYNC_TOOL_VERSION,
+                                                       changeset_revision=constants_phaeo.ORGANISM_SYNC_TOOL_CHANGESET_REVISION,
+                                                       instance=self.instance)
+
+        logging.info("Success: individual tools versions and changesets validated")
+
+    def add_analysis(self, name, programversion, sourcename):
+
+        add_analysis_tool_dataset = utilities_bioblend.run_tool_and_download_single_output_dataset(
+            instance=self.instance,
+            tool_id=constants_phaeo.ADD_ANALYSIS_TOOL_ID,
+            history_id=self.history_id,
+            tool_inputs={"name": name,
+                         "program": constants_phaeo.ADD_ANALYSIS_TOOL_PARAM_PROGRAM,
+                         "programversion": programversion,
+                         "sourcename": sourcename,
+                         "date_executed": constants_phaeo.ADD_ANALYSIS_TOOL_PARAM_DATE})
+        analysis_dict = json.loads(add_analysis_tool_dataset)
+        analysis_id = str(analysis_dict["analysis_id"])
+
+        return analysis_id
+
+    def sync_analysis(self, analysis_id):
+
+        time.sleep(60)
+        utilities_bioblend.run_tool(
+            instance=self.instance,
+            tool_id=constants_phaeo.ANALYSIS_SYNC_TOOL_ID,
+            history_id=self.history_id,
+            tool_inputs={"analysis_id": analysis_id})
+
+    def add_organism_and_sync(self):
+
+        get_organisms_tool_dataset = utilities_bioblend.run_tool_and_download_single_output_dataset(
+            instance=self.instance,
+            tool_id=constants_phaeo.GET_ORGANISMS_TOOL_ID,
+            history_id=self.history_id,
+            tool_inputs={},
+            time_sleep=10
+        )
+        organisms_dict_list = json.loads(get_organisms_tool_dataset)  # Turn the dataset into a list for parsing
+
+        org_id = None
+
+        # Look up list of outputs (dictionaries)
+        for org_dict in organisms_dict_list:
+            if org_dict["genus"] == self.genus_uppercase and org_dict["species"] == self.chado_species_name:
+                org_id = str(org_dict["organism_id"])  # id needs to be a str to be recognized by chado tools
+
+        if org_id is None:
+            add_organism_tool_dataset = utilities_bioblend.run_tool_and_download_single_output_dataset(
+                instance=self.instance,
+                tool_id=constants_phaeo.ADD_ORGANISM_TOOL_ID,
+                history_id=self.history_id,
+                tool_inputs={"abbr": self.abbreviation,
+                             "genus": self.genus_uppercase,
+                             "species": self.chado_species_name,
+                             "common": self.common})
+            organism_dict = json.loads(add_organism_tool_dataset)
+            org_id = str(organism_dict["organism_id"])  # id needs to be a str to be recognized by chado tools
+
+        # Synchronize newly added organism in Tripal
+        logging.info("Synchronizing organism %s in Tripal" % self.full_name)
+        time.sleep(60)
+        utilities_bioblend.run_tool(
+            instance=self.instance,
+            tool_id=constants_phaeo.ORGANISM_SYNC_TOOL_ID,
+            history_id=self.history_id,
+            tool_inputs={"organism_id": org_id})
+
+        return org_id
+
+    def get_analyses(self):
+
+        get_analyses_tool_dataset = utilities_bioblend.run_tool_and_download_single_output_dataset(
+            instance=self.instance,
+            tool_id=constants_phaeo.GET_ANALYSES_TOOL_ID,
+            history_id=self.history_id,
+            tool_inputs={},
+            time_sleep=10
+        )
+        analyses_dict_list = json.loads(get_analyses_tool_dataset)
+        return analyses_dict_list
+
+    def add_analysis_and_sync(self, analyses_dict_list, analysis_name, analysis_programversion, analysis_sourcename):
+        """
+        Add one analysis to Chado database
+        Required for Chado Load Tripal Synchronize workflow (which should be ran as the first workflow)
+        Called outside workflow for practical reasons (Chado add doesn't have an input link for analysis or organism)
+        """
+
+        analysis_id = None
+
+        # Look up list of outputs (dictionaries)
+        for analyses_dict in analyses_dict_list:
+            if analyses_dict["name"] == analysis_name:
+                analysis_id = str(analyses_dict["analysis_id"])
+
+        if analysis_id is None:
+            analysis_id = self.add_analysis(
+                name=analysis_name,
+                programversion=analysis_programversion,
+                sourcename=analysis_sourcename
+            )
+
+        # Synchronize analysis in Tripal
+        logging.info("Synchronizing analysis %s in Tripal" % analysis_name)
+        self.sync_analysis(analysis_id=analysis_id)
+
+        return analysis_id
+
+    def add_organism_blastp_analysis(self):
+        """
+        Add OGS and genome vX analyses to Chado database
+        Required for Chado Load Tripal Synchronize workflow (which should be ran as the first workflow)
+        Called outside workflow for practical reasons (Chado add doesn't have an input link for analysis or organism)
+
+        :return:
+
+        """
+
+        self.set_galaxy_instance()
+        self.set_history()
+
+        tool_version = "2.3.4+galaxy0"
+
+        get_organism_tool = self.instance.tools.show_tool("toolshed.g2.bx.psu.edu/repos/gga/chado_organism_get_organisms/organism_get_organisms/2.3.4+galaxy0")
+
+        get_organisms = self.instance.tools.run_tool(
+            tool_id="toolshed.g2.bx.psu.edu/repos/gga/chado_organism_get_organisms/organism_get_organisms/%s" % tool_version,
+            history_id=self.history_id,
+            tool_inputs={})
+
+        time.sleep(10)  # Ensure the tool has had time to complete
+        org_outputs = get_organisms["outputs"]  # Outputs from the get_organism tool
+        org_job_out_id = org_outputs[0]["id"]  # ID of the get_organism output dataset (list of dicts)
+        org_json_output = self.instance.datasets.download_dataset(dataset_id=org_job_out_id)  # Download the dataset
+        org_output = json.loads(org_json_output)  # Turn the dataset into a list for parsing
+
+        org_id = None
+
+        # Look up list of outputs (dictionaries)
+        for organism_output_dict in org_output:
+            if organism_output_dict["genus"] == self.genus and organism_output_dict["species"] == "{0} {1}".format(self.species, self.sex):
+                correct_organism_id = str(organism_output_dict["organism_id"])  # id needs to be a str to be recognized by chado tools
+                org_id = str(correct_organism_id)
+
+
+        if org_id is None:
+            add_org_job = self.instance.tools.run_tool(
+                tool_id="toolshed.g2.bx.psu.edu/repos/gga/chado_organism_add_organism/organism_add_organism/%s" % tool_version,
+                history_id=self.history_id,
+                tool_inputs={"abbr": self.abbreviation,
+                             "genus": self.genus_uppercase,
+                             "species": self.chado_species_name,
+                             "common": self.common})
+            org_job_out_id = add_org_job["outputs"][0]["id"]
+            org_json_output = self.instance.datasets.download_dataset(dataset_id=org_job_out_id)
+            org_output = json.loads(org_json_output)
+            org_id = str(org_output["organism_id"])  # id needs to be a str to be recognized by chado tools
+
+            # Synchronize newly added organism in Tripal
+            logging.info("Synchronizing organism %s in Tripal" % self.full_name)
+            time.sleep(60)
+            org_sync = self.instance.tools.run_tool(tool_id="toolshed.g2.bx.psu.edu/repos/gga/tripal_organism_sync/organism_sync/3.2.1.0",
+                                                    history_id=self.history_id,
+                                                    tool_inputs={"organism_id": org_id})
+
+
+        get_analyses = self.instance.tools.run_tool(
+            tool_id="toolshed.g2.bx.psu.edu/repos/gga/chado_analysis_get_analyses/analysis_get_analyses/%s" % tool_version,
+            history_id=self.history_id,
+            tool_inputs={})
+
+        time.sleep(10)
+        analysis_outputs = get_analyses["outputs"]
+        analysis_job_out_id = analysis_outputs[0]["id"]
+        analysis_json_output = self.instance.datasets.download_dataset(dataset_id=analysis_job_out_id)
+        analysis_output = json.loads(analysis_json_output)
+
+        blastp_analysis_id = None
+
+        # Look up list of outputs (dictionaries)
+        for analysis_output_dict in analysis_output:
+            if analysis_output_dict["name"] == "Diamond on " + self.full_name_lowercase + " OGS" + self.ogs_version:
+                blastp_analysis_id = str(analysis_output_dict["analysis_id"])
+
+
+        if blastp_analysis_id is None:
+            add_blast_analysis_job = self.instance.tools.run_tool(
+                tool_id="toolshed.g2.bx.psu.edu/repos/gga/chado_analysis_add_analysis/analysis_add_analysis/%s" % tool_version,
+                history_id=self.history_id,
+                tool_inputs={"name": "Diamond on " + self.full_name_lowercase + " OGS" + self.ogs_version,
+                             "program": "Performed by Genoscope",
+                             "programversion": str(self.sex + " OGS" + self.ogs_version),
+                             "sourcename": "Genoscope",
+                             "date_executed": self.date})
+            analysis_outputs = add_blast_analysis_job["outputs"]
+            analysis_job_out_id = analysis_outputs[0]["id"]
+            analysis_json_output = self.instance.datasets.download_dataset(dataset_id=analysis_job_out_id)
+            analysis_output = json.loads(analysis_json_output)
+            blastp_analysis_id = str(analysis_output["analysis_id"])
+
+        # Synchronize blastp analysis
+        logging.info("Synchronizing Diamong blastp OGS%s analysis in Tripal" % self.ogs_version)
+        time.sleep(60)
+        blastp_analysis_sync = self.instance.tools.run_tool(tool_id="toolshed.g2.bx.psu.edu/repos/gga/tripal_analysis_sync/analysis_sync/3.2.1.0",
+                                                            history_id=self.history_id,
+                                                            tool_inputs={"analysis_id": blastp_analysis_id})
+
+        # print({"org_id": org_id, "genome_analysis_id": genome_analysis_id, "ogs_analysis_id": ogs_analysis_id})
+        return {"org_id": org_id, "blastp_analysis_id": blastp_analysis_id}
+
+    def add_organism_interproscan_analysis(self):
+        """
+        Add OGS and genome vX analyses to Chado database
+        Required for Chado Load Tripal Synchronize workflow (which should be ran as the first workflow)
+        Called outside workflow for practical reasons (Chado add doesn't have an input link for analysis or organism)
+
+        :return:
+
+        """
+
+        self.set_galaxy_instance()
+        self.set_history()
+
+        tool_version = "2.3.4+galaxy0"
+
+        get_organism_tool = self.instance.tools.show_tool("toolshed.g2.bx.psu.edu/repos/gga/chado_organism_get_organisms/organism_get_organisms/2.3.4+galaxy0")
+
+        get_organisms = self.instance.tools.run_tool(
+            tool_id="toolshed.g2.bx.psu.edu/repos/gga/chado_organism_get_organisms/organism_get_organisms/%s" % tool_version,
+            history_id=self.history_id,
+            tool_inputs={})
+
+        time.sleep(10)  # Ensure the tool has had time to complete
+        org_outputs = get_organisms["outputs"]  # Outputs from the get_organism tool
+        org_job_out_id = org_outputs[0]["id"]  # ID of the get_organism output dataset (list of dicts)
+        org_json_output = self.instance.datasets.download_dataset(dataset_id=org_job_out_id)  # Download the dataset
+        org_output = json.loads(org_json_output)  # Turn the dataset into a list for parsing
+
+        org_id = None
+
+        # Look up list of outputs (dictionaries)
+        for organism_output_dict in org_output:
+            if organism_output_dict["genus"] == self.genus and organism_output_dict["species"] == "{0} {1}".format(self.species, self.sex):
+                correct_organism_id = str(organism_output_dict["organism_id"])  # id needs to be a str to be recognized by chado tools
+                org_id = str(correct_organism_id)
+
+
+        if org_id is None:
+            add_org_job = self.instance.tools.run_tool(
+                tool_id="toolshed.g2.bx.psu.edu/repos/gga/chado_organism_add_organism/organism_add_organism/%s" % tool_version,
+                history_id=self.history_id,
+                tool_inputs={"abbr": self.abbreviation,
+                             "genus": self.genus_uppercase,
+                             "species": self.chado_species_name,
+                             "common": self.common})
+            org_job_out_id = add_org_job["outputs"][0]["id"]
+            org_json_output = self.instance.datasets.download_dataset(dataset_id=org_job_out_id)
+            org_output = json.loads(org_json_output)
+            org_id = str(org_output["organism_id"])  # id needs to be a str to be recognized by chado tools
+
+            # Synchronize newly added organism in Tripal
+            logging.info("Synchronizing organism %s in Tripal" % self.full_name)
+            time.sleep(60)
+            org_sync = self.instance.tools.run_tool(tool_id="toolshed.g2.bx.psu.edu/repos/gga/tripal_organism_sync/organism_sync/3.2.1.0",
+                                                    history_id=self.history_id,
+                                                    tool_inputs={"organism_id": org_id})
+
+
+        get_analyses = self.instance.tools.run_tool(
+            tool_id="toolshed.g2.bx.psu.edu/repos/gga/chado_analysis_get_analyses/analysis_get_analyses/%s" % tool_version,
+            history_id=self.history_id,
+            tool_inputs={})
+
+        time.sleep(10)
+        analysis_outputs = get_analyses["outputs"]
+        analysis_job_out_id = analysis_outputs[0]["id"]
+        analysis_json_output = self.instance.datasets.download_dataset(dataset_id=analysis_job_out_id)
+        analysis_output = json.loads(analysis_json_output)
+
+        interpro_analysis_id = None
+
+        # Look up list of outputs (dictionaries)
+        for analysis_output_dict in analysis_output:
+            if analysis_output_dict["name"] == "Interproscan on " + self.full_name_lowercase + " OGS" + self.ogs_version:
+                interpro_analysis_id = str(analysis_output_dict["analysis_id"])
+
+
+        if interpro_analysis_id is None:
+            add_interproscan_analysis_job = self.instance.tools.run_tool(
+                tool_id="toolshed.g2.bx.psu.edu/repos/gga/chado_analysis_add_analysis/analysis_add_analysis/%s" % tool_version,
+                history_id=self.history_id,
+                tool_inputs={"name": "Interproscan on " + self.full_name_lowercase + " OGS" + self.ogs_version,
+                             "program": "Performed by Genoscope",
+                             "programversion": str(self.sex + " OGS" + self.ogs_version),
+                             "sourcename": "Genoscope",
+                             "date_executed": self.date})
+            analysis_outputs = add_interproscan_analysis_job["outputs"]
+            analysis_job_out_id = analysis_outputs[0]["id"]
+            analysis_json_output = self.instance.datasets.download_dataset(dataset_id=analysis_job_out_id)
+            analysis_output = json.loads(analysis_json_output)
+            interpro_analysis_id = str(analysis_output["analysis_id"])
+
+        # Synchronize blastp analysis
+        logging.info("Synchronizing Diamong blastp OGS%s analysis in Tripal" % self.ogs_version)
+        time.sleep(60)
+        interproscan_analysis_sync = self.instance.tools.run_tool(tool_id="toolshed.g2.bx.psu.edu/repos/gga/tripal_analysis_sync/analysis_sync/3.2.1.0",
+                                                            history_id=self.history_id,
+                                                            tool_inputs={"analysis_id": interpro_analysis_id})
+
+        # print({"org_id": org_id, "genome_analysis_id": genome_analysis_id, "ogs_analysis_id": ogs_analysis_id})
+        return({"org_id": org_id, "interpro_analysis_id": interpro_analysis_id})
+
+
+    def get_interpro_analysis_id(self):
+        """
+        """
+
+        # Get interpro ID
+        interpro_analysis = self.instance.tools.run_tool(
+            tool_id="toolshed.g2.bx.psu.edu/repos/gga/chado_analysis_get_analyses/analysis_get_analyses/2.3.4+galaxy0",
+            history_id=self.history_id,
+            tool_inputs={"name": "InterproScan on OGS%s" % self.ogs_version})
+        interpro_analysis_job_out = interpro_analysis["outputs"][0]["id"]
+        interpro_analysis_json_output = self.instance.datasets.download_dataset(dataset_id=interpro_analysis_job_out)
+        try:
+            interpro_analysis_output = json.loads(interpro_analysis_json_output)[0]
+            self.interpro_analysis_id = str(interpro_analysis_output["analysis_id"])
+        except IndexError as exc:
+            logging.critical("No matching InterproScan analysis exists in the instance's chado database")
+            sys.exit(exc)
+
+        return self.interpro_analysis_id
+
+
+    def get_invocation_report(self, workflow_name):
+        """
+        Debugging method for workflows
+
+        Simply logs and returns a report of the previous workflow invocation (execution of a workflow in
+        the instance via the API)
+
+        :param workflow_name:
+        :return:
+        """
+
+        workflow_attributes = self.instance.workflows.get_workflows(name=workflow_name)
+        workflow_id = workflow_attributes[1]["id"]  # Most recently imported workflow (index 1 in the list)
+        invocations = self.instance.workflows.get_invocations(workflow_id=workflow_id)
+        invocation_id = invocations[1]["id"]  # Most recent invocation
+        invocation_report = self.instance.invocations.get_invocation_report(invocation_id=invocation_id)
+
+        logging.debug(invocation_report)
+
+        return invocation_report
+
+    def import_datasets_into_history(self):
+        """
+        Find datasets in a library, get their ID and import them into the current history if they are not already
+
+        :return:
+        """
+
+        genome_ldda_id = None
+        transcripts_ldda_id = None
+        proteins_ldda_id = None
+        gff_ldda_id = None
+        interpro_ldda_id = None
+        blastp_ldda_id = None
+        blastx_ldda_id = None
+
+        genome_hda_id = None
+        gff_hda_id = None
+        transcripts_hda_id = None
+        proteins_hda_id = None
+        blastp_hda_id = None
+        blastx_hda_id = None
+        interproscan_hda_id = None
+
+        folder_dict_list = self.instance.libraries.get_folders(library_id=str(self.library_id))
+
+        folders_id_dict = {}
+
+        # Loop over the folders in the library and map folders names to their IDs
+        for folder_dict in folder_dict_list:
+            folders_id_dict[folder_dict["name"]] = folder_dict["id"]
+
+        # Iterating over the folders to find datasets and map datasets to their IDs
+        for folder_name, folder_id in folders_id_dict.items():
+            if folder_name == "/genome/{0}/v{1}".format(self.species_folder_name, self.genome_version):
+                sub_folder_content = self.instance.folders.show_folder(folder_id=folder_id, contents=True)
+                for value in sub_folder_content.values():
+                    for e in value:
+                        if type(e) == dict:
+                            if e["name"].endswith(self.genome_filename):
+                                genome_ldda_id = e["ldda_id"]
+
+            if folder_name == "/annotation/{0}/OGS{1}".format(self.species_folder_name, self.ogs_version):
+                sub_folder_content = self.instance.folders.show_folder(folder_id=folder_id, contents=True)
+                for value in sub_folder_content.values():
+                    for e in value:
+                        if type(e) == dict:
+                            ldda_name = e["name"]
+                            ldda_id = e["ldda_id"]
+                            if ldda_name.endswith(self.transcripts_filename):
+                                transcripts_ldda_id = ldda_id
+                            elif ldda_name.endswith(self.proteins_filename):
+                                proteins_ldda_id = ldda_id
+                            elif ldda_name.endswith(self.gff_filename):
+                                gff_ldda_id = ldda_id
+                            elif ldda_name.endswith(self.interpro_filename):
+                                interpro_ldda_id = ldda_id
+                            elif ldda_name.endswith(self.blastp_filename):
+                                blastp_ldda_id = ldda_id
+                            elif ldda_name.endswith(self.blastx_filename):
+                                blastx_ldda_id = ldda_id
+
+        hda_list = self.instance.datasets.get_datasets(self.history_id)
+        # Finding datasets in history (matching datasets names)
+        for hda in hda_list:
+            hda_name = hda["name"]
+            hda_id = hda["id"]
+            if hda_name == self.genome_filename:
+                genome_hda_id = hda_id
+            if hda_name ==  self.gff_filename:
+                gff_hda_id = hda_id
+            if hda_name == self.transcripts_filename:
+                transcripts_hda_id = hda_id
+            if hda_name == self.proteins_filename :
+                proteins_hda_id = hda_id
+            if hda_name == self.blastp_filename:
+                blastp_hda_id = hda_id
+            if hda_name == self.blastx_filename:
+                blastx_hda_id = hda_id
+            if hda_name == self.interpro_filename:
+                interproscan_hda_id = hda_id
+
+        # Import each dataset into history if it is not imported
+        logging.debug("Uploading datasets into history %s" % self.history_id)
+
+        if genome_hda_id is None:
+            genome_dataset_upload = self.instance.histories.upload_dataset_from_library(history_id=self.history_id, lib_dataset_id=genome_ldda_id)
+            genome_hda_id = genome_dataset_upload["id"]
+        if gff_hda_id is  None:
+            gff_dataset_upload = self.instance.histories.upload_dataset_from_library(history_id=self.history_id, lib_dataset_id=gff_ldda_id)
+            gff_hda_id = gff_dataset_upload["id"]
+        if proteins_hda_id is None:
+            proteins_dataset_upload = self.instance.histories.upload_dataset_from_library(history_id=self.history_id, lib_dataset_id=proteins_ldda_id)
+            proteins_hda_id = proteins_dataset_upload["id"]
+        if transcripts_hda_id is None:
+            transcripts_dataset_upload = self.instance.histories.upload_dataset_from_library(history_id=self.history_id, lib_dataset_id=transcripts_ldda_id)
+            transcripts_hda_id = transcripts_dataset_upload["id"]
+        if interproscan_hda_id is None:
+            try:
+                interproscan_dataset_upload = self.instance.histories.upload_dataset_from_library(history_id=self.history_id, lib_dataset_id=interpro_ldda_id)
+                interproscan_hda_id = interproscan_dataset_upload["id"]
+            except Exception as exc:
+                logging.debug("Interproscan file not found in library (history: {0})".format(self.history_id))
+        if blastp_hda_id is None:
+            try:
+                blastp_dataset_upload = self.instance.histories.upload_dataset_from_library(history_id=self.history_id, lib_dataset_id=blastp_ldda_id)
+                blastp_hda_id = blastp_dataset_upload["id"]
+            except Exception as exc:
+                logging.debug("blastp file not found in library (history: {0})".format(self.history_id))
+        if blastx_hda_id is None:
+            try:
+                blastx_dataset_upload = self.instance.histories.upload_dataset_from_library(history_id=self.history_id, lib_dataset_id=blastx_ldda_id)
+                blastx_hda_id = blastx_dataset_upload["id"]
+            except Exception as exc:
+                logging.debug("blastp file not found in library (history: {0})".format(self.history_id))
+
+        self.genome_hda_id = genome_hda_id
+        self.gff_hda_id = gff_hda_id
+        self.transcripts_hda_id = transcripts_hda_id
+        self.proteins_hda_id = proteins_hda_id
+        self.blastp_hda_id = blastp_hda_id
+        self.blastx_hda_id = blastx_hda_id
+        self.interproscan_hda_id = interproscan_hda_id
+
+def get_sp_workflow_param(sp_dict, main_dir, config, workflow_type):
+    """
+    """
+
+    run_workflow_for_current_organism = RunWorkflow(parameters_dictionary=sp_dict)
+
+    # Verifying the galaxy container is running
+    if not utilities_bioblend.check_galaxy_state(network_name=run_workflow_for_current_organism.genus_species,
+                                    script_dir=run_workflow_for_current_organism.script_dir):
+        logging.critical("The galaxy container for %s is not ready yet!" % run_workflow_for_current_organism.genus_species)
+        sys.exit()
+
+    else:
+
+        # Setting some of the instance attributes
+        run_workflow_for_current_organism.main_dir = main_dir
+        run_workflow_for_current_organism.species_dir = os.path.join(run_workflow_for_current_organism.main_dir,
+                                                                     run_workflow_for_current_organism.genus_species +
+                                                                     "/")
+
+        # Parse the config yaml file
+        run_workflow_for_current_organism.config = config
+        # Set the instance url attribute --> TODO: the localhost rule in the docker-compose still doesn't work on scratchgmodv1
+        run_workflow_for_current_organism.instance_url = "http://localhost:{0}/sp/{1}/galaxy/".format(
+            run_workflow_for_current_organism.config[constants.CONF_ALL_HTTP_PORT],
+            run_workflow_for_current_organism.genus_species)
+
+        run_workflow_for_current_organism.instance = utilities_bioblend.get_galaxy_instance(
+            instance_url=run_workflow_for_current_organism.instance_url,
+            email=run_workflow_for_current_organism.config[constants.CONF_GALAXY_DEFAULT_ADMIN_EMAIL],
+            password=run_workflow_for_current_organism.config[constants.CONF_GALAXY_DEFAULT_ADMIN_PASSWORD],
+        )
+        history_id = utilities_bioblend.get_history(
+            instance=run_workflow_for_current_organism.instance,
+            history_name=run_workflow_for_current_organism.history_name)
+        run_workflow_for_current_organism.install_changesets_revisions_for_individual_tools()
+
+        if workflow_type == constants_phaeo.WF_LOAD_GFF_JB:
+
+            analyses_dict_list = run_workflow_for_current_organism.get_analyses()
+
+            org_id = run_workflow_for_current_organism.add_organism_and_sync()
+            genome_analysis_id = run_workflow_for_current_organism.add_analysis_and_sync(
+                analyses_dict_list=analyses_dict_list,
+                analysis_name=run_workflow_for_current_organism.genome_analysis_name,
+                analysis_programversion=run_workflow_for_current_organism.genome_analysis_programversion,
+                analysis_sourcename=run_workflow_for_current_organism.genome_analysis_sourcename
+            )
+            ogs_analysis_id = run_workflow_for_current_organism.add_analysis_and_sync(
+                analyses_dict_list=analyses_dict_list,
+                analysis_name=run_workflow_for_current_organism.ogs_analysis_name,
+                analysis_programversion=run_workflow_for_current_organism.ogs_analysis_programversion,
+                analysis_sourcename=run_workflow_for_current_organism.ogs_analysis_sourcename
+            )
+
+            run_workflow_for_current_organism.import_datasets_into_history()
+
+            # Create the StrainWorkflowParam object holding all attributes needed for the workflow
+            sp_wf_param = OrgWorkflowParamJbrowse(
+                genus_species=run_workflow_for_current_organism.genus_species,
+                strain_sex=run_workflow_for_current_organism.strain_sex,
+                genus_uppercase = run_workflow_for_current_organism.genus_uppercase,
+                full_name=run_workflow_for_current_organism.full_name,
+                species_folder_name=run_workflow_for_current_organism.species_folder_name,
+                chado_species_name=run_workflow_for_current_organism.chado_species_name,
+                org_id=org_id,
+                genome_analysis_id=genome_analysis_id,
+                ogs_analysis_id=ogs_analysis_id,
+                genome_hda_id=run_workflow_for_current_organism.genome_hda_id,
+                gff_hda_id=run_workflow_for_current_organism.gff_hda_id,
+                transcripts_hda_id=run_workflow_for_current_organism.transcripts_hda_id,
+                proteins_hda_id=run_workflow_for_current_organism.proteins_hda_id,
+                blastp_hda_id=run_workflow_for_current_organism.blastp_hda_id,
+                blastx_hda_id=run_workflow_for_current_organism.blastx_hda_id,
+                interproscan_hda_id=run_workflow_for_current_organism.interproscan_hda_id,
+                history_id=history_id,
+                instance=run_workflow_for_current_organism.instance
+            )
+            sp_wf_param.check_param_for_workflow_load_fasta_gff_jbrowse()
+
+        if workflow_type == "blast":
+
+            ids = run_workflow_for_current_organism.add_organism_blastp_analysis()
+
+            org_id = ids["org_id"]
+            blastp_analysis_id = ids["blastp_analysis_id"]
+            run_workflow_for_current_organism.import_datasets_into_history()
+
+            # Create the StrainWorkflowParam object holding all attributes needed for the workflow
+            sp_wf_param = OrgWorkflowParamJbrowse(
+                genus_species=run_workflow_for_current_organism.genus_species,
+                strain_sex=run_workflow_for_current_organism.strain_sex,
+                genus_uppercase = run_workflow_for_current_organism.genus_uppercase,
+                full_name=run_workflow_for_current_organism.full_name,
+                species_folder_name=run_workflow_for_current_organism.species_folder_name,
+                chado_species_name=run_workflow_for_current_organism.chado_species_name,
+                org_id=org_id,
+                blastp_analysis_id=blastp_analysis_id,
+                genome_hda_id=run_workflow_for_current_organism.genome_hda_id,
+                gff_hda_id=run_workflow_for_current_organism.gff_hda_id,
+                transcripts_hda_id=run_workflow_for_current_organism.transcripts_hda_id,
+                proteins_hda_id=run_workflow_for_current_organism.proteins_hda_id,
+                blastp_hda_id=run_workflow_for_current_organism.blastp_hda_id,
+                blastx_hda_id=run_workflow_for_current_organism.blastx_hda_id,
+                interproscan_hda_id=run_workflow_for_current_organism.interproscan_hda_id,
+                history_id=history_id,
+                instance=run_workflow_for_current_organism.instance
+            )
+            sp_wf_param.check_param_for_workflow_blastp()
+
+        if workflow_type == "interpro":
+
+            ids = run_workflow_for_current_organism.add_organism_interproscan_analysis()
+
+            org_id = ids["org_id"]
+            interpro_analysis_id = ids["interpro_analysis_id"]
+            run_workflow_for_current_organism.import_datasets_into_history()
+
+            # Create the StrainWorkflowParam object holding all attributes needed for the workflow
+            sp_wf_param = OrgWorkflowParamJbrowse(
+                genus_species=run_workflow_for_current_organism.genus_species,
+                strain_sex=run_workflow_for_current_organism.strain_sex,
+                genus_uppercase = run_workflow_for_current_organism.genus_uppercase,
+                full_name=run_workflow_for_current_organism.full_name,
+                species_folder_name=run_workflow_for_current_organism.species_folder_name,
+                chado_species_name=run_workflow_for_current_organism.chado_species_name,
+                org_id=org_id,
+                interpro_analysis_id=interpro_analysis_id,
+                genome_hda_id=run_workflow_for_current_organism.genome_hda_id,
+                gff_hda_id=run_workflow_for_current_organism.gff_hda_id,
+                transcripts_hda_id=run_workflow_for_current_organism.transcripts_hda_id,
+                proteins_hda_id=run_workflow_for_current_organism.proteins_hda_id,
+                blastp_hda_id=run_workflow_for_current_organism.blastp_hda_id,
+                blastx_hda_id=run_workflow_for_current_organism.blastx_hda_id,
+                interproscan_hda_id=run_workflow_for_current_organism.interproscan_hda_id,
+                history_id=history_id,
+                instance=run_workflow_for_current_organism.instance
+            )
+            sp_wf_param.check_param_for_workflow_interpro()
+
+        return sp_wf_param
+
+
+def install_changesets_revisions_from_workflow(instance, workflow_path):
+    """
+    Read a .ga file to extract the information about the different tools called.
+    Check if every tool is installed via a "show_tool".
+    If a tool is not installed (versions don't match), send a warning to the logger and install the required changeset (matching the tool version)
+    Doesn't do anything if versions match
+
+    :return:
+    """
+
+    logging.info("Validating that installed tools versions and changesets match workflow versions")
+
+    # Load the workflow file (.ga) in a buffer
+    with open(workflow_path, 'r') as ga_in_file:
+
+        # Then store the decoded json dictionary
+        workflow_dict = json.load(ga_in_file)
+
+        # Look up every "step_id" looking for tools
+        for step in workflow_dict["steps"].values():
+            if step["tool_id"]:
+                # Check if an installed version matches the workflow tool version
+                # (If it's not installed, the show_tool version returned will be a default version with the suffix "XXXX+0")
+                utilities_bioblend.install_repository_revision(tool_id=step["tool_id"],
+                                                               version=step["tool_version"],
+                                                               changeset_revision=step["tool_shed_repository"]["changeset_revision"],
+                                                               instance=instance)
+
+    logging.info("Tools versions and changeset_revisions from workflow validated")
+
+if __name__ == "__main__":
+    parser = argparse.ArgumentParser(description="Run Galaxy workflows, specific to Phaeoexplorer data")
+
+    parser.add_argument("input",
+                        type=str,
+                        help="Input file (yml)")
+
+    parser.add_argument("-v", "--verbose",
+                        help="Increase output verbosity",
+                        action="store_true")
+
+    parser.add_argument("--config",
+                        type=str,
+                        help="Config path, default to the 'config' file inside the script repository")
+
+    parser.add_argument("--main-directory",
+                        type=str,
+                        help="Where the stack containers will be located, defaults to working directory")
+
+    parser.add_argument("--workflow", "-w",
+                        type=str,
+                        help="Worfklow to run. Available options: load_fasta_gff_jbrowse, blast, interpro")
+
+    args = parser.parse_args()
+
+    bioblend_logger = logging.getLogger("bioblend")
+    if args.verbose:
+        logging.basicConfig(level=logging.DEBUG)
+        bioblend_logger.setLevel(logging.DEBUG)
+    else:
+        logging.basicConfig(level=logging.INFO)
+        bioblend_logger.setLevel(logging.INFO)
+
+    # Parsing the config file if provided, using the default config otherwise
+    if args.config:
+        config_file = os.path.abspath(args.config)
+    else:
+        config_file = os.path.join(os.path.dirname(os.path.realpath(sys.argv[0])), constants.DEFAULT_CONFIG)
+    config = utilities.parse_config(config_file)
+
+    main_dir = None
+    if not args.main_directory:
+        main_dir = os.getcwd()
+    else:
+        main_dir = os.path.abspath(args.main_directory)
+
+    sp_dict_list = utilities.parse_input(args.input)
+
+    workflow_type = None
+    #  Checking if user specified a workflow to run
+    if not args.workflow:
+        logging.critical("No workflow type specified, exiting")
+        sys.exit()
+    elif args.workflow in constants_phaeo.WORKFLOW_VALID_TYPES:
+        workflow_type = args.workflow
+    logging.info("Workflow type set to '%s'" % workflow_type)
+
+    script_dir = os.path.dirname(os.path.realpath(sys.argv[0]))
+    all_sp_workflow_dict = {}
+
+    if workflow_type == constants_phaeo.WF_LOAD_GFF_JB:
+        for sp_dict in sp_dict_list:
+
+            # Add and retrieve all analyses/organisms for the current input species and add their IDs to the input dictionary
+            sp_wf_param = get_sp_workflow_param(
+                sp_dict,
+                main_dir=main_dir,
+                config=config,
+                workflow_type=constants_phaeo.WF_LOAD_GFF_JB)
+
+            current_sp_genus_species = sp_wf_param.genus_species
+            current_sp_strain_sex = sp_wf_param.strain_sex
+
+            # Add the species dictionary to the complete dictionary
+            # This dictionary contains every organism present in the input file
+            # Its structure is the following:
+            # {genus species: {strain1_sex1: {variables_key: variables_values}, strain1_sex2: {variables_key: variables_values}}}
+            if not current_sp_genus_species in all_sp_workflow_dict.keys():
+                all_sp_workflow_dict[current_sp_genus_species] = {current_sp_strain_sex: sp_wf_param}
+            else:
+                if not current_sp_strain_sex in all_sp_workflow_dict[current_sp_genus_species].keys():
+                    all_sp_workflow_dict[current_sp_genus_species][current_sp_strain_sex] = sp_wf_param
+                else:
+                    logging.error("Duplicate organism with 'genus_species' = '{0}' and 'strain_sex' = '{1}'".format(current_sp_genus_species, current_sp_strain_sex))
+
+        for species, strains in all_sp_workflow_dict.items():
+            strains_list = list(strains.keys())
+            strains_count = len(strains_list)
+
+            if strains_count == 1:
+                logging.info("Input species %s: 1 strain detected in input dictionary" % species)
+                strain_sex = list(strains.keys())[0]
+                sp_wf_param = strains[strain_sex]
+
+                # Set workflow path (1 organism)
+                workflow_path = os.path.join(os.path.abspath(script_dir), constants_phaeo.WORKFLOWS_PATH, constants_phaeo.WF_LOAD_GFF_JB_1_ORG_FILE)
+
+                # Check if the versions of tools specified in the workflow are installed in galaxy
+                install_changesets_revisions_from_workflow(workflow_path=workflow_path, instance=sp_wf_param.instance)
+
+                # Set the workflow parameters (individual tools runtime parameters in the workflow)
+                workflow_parameters = {}
+                # Input files have no parameters (they are set via assigning the hda IDs in the datamap parameter of the bioblend method)
+                workflow_parameters[constants_phaeo.WF_LOAD_GFF_JB_1_ORG_INPUT_GENOME] = {}
+                workflow_parameters[constants_phaeo.WF_LOAD_GFF_JB_1_ORG_INPUT_GFF] = {}
+                workflow_parameters[constants_phaeo.WF_LOAD_GFF_JB_1_ORG_INPUT_PROTEINS] = {}
+                workflow_parameters[constants_phaeo.WF_LOAD_GFF_JB_1_ORG_STEP_LOAD_FASTA] = {
+                    "organism": sp_wf_param.org_id,
+                    "analysis_id": sp_wf_param.genome_analysis_id,
+                    "do_update": "true"}
+                workflow_parameters[constants_phaeo.WF_LOAD_GFF_JB_1_ORG_STEP_JBROWSE] = {}
+                workflow_parameters[constants_phaeo.WF_LOAD_GFF_JB_1_ORG_STEP_LOAD_GFF] = {
+                    "organism": sp_wf_param.org_id,
+                    "analysis_id": sp_wf_param.ogs_analysis_id}
+                workflow_parameters[constants_phaeo.WF_LOAD_GFF_JB_1_ORG_STEP_FEATURE_SYNC] = {
+                    "organism_id": sp_wf_param.org_id}
+                workflow_parameters[constants_phaeo.WF_LOAD_GFF_JB_1_ORG_STEP_POPULATE_VIEWS] = {}
+                workflow_parameters[constants_phaeo.WF_LOAD_GFF_JB_1_ORG_STEP_INDEX] = {}
+
+                # Set datamap (mapping of input files in the workflow)
+                datamap = {}
+                datamap[constants_phaeo.WF_LOAD_GFF_JB_1_ORG_INPUT_GENOME] = {"src": "hda", "id": sp_wf_param.genome_hda_id}
+                datamap[constants_phaeo.WF_LOAD_GFF_JB_1_ORG_INPUT_GFF] = {"src": "hda", "id": sp_wf_param.gff_hda_id}
+                datamap[constants_phaeo.WF_LOAD_GFF_JB_1_ORG_INPUT_PROTEINS] = {"src": "hda", "id": sp_wf_param.proteins_hda_id}
+
+                with open(workflow_path, 'r') as ga_in_file:
+
+                    # Store the decoded json dictionary
+                    workflow_dict = json.load(ga_in_file)
+                    workflow_name = workflow_dict["name"]
+
+                    # For the Jbrowse tool, we unfortunately have to manually edit the parameters instead of setting them
+                    # as runtime values, using runtime parameters makes the tool throw an internal critical error ("replace not found" error)
+                    # Scratchgmod test: need "http" (or "https"), the hostname (+ port)
+                    if constants.CONF_JBROWSE_MENU_URL not in config.keys():
+                        # default
+                        root_url = "https://{0}".format(config[constants.CONF_ALL_HOSTNAME])
+                    else:
+                        root_url = config[constants.CONF_JBROWSE_MENU_URL]
+                    species_strain_sex = sp_wf_param.chado_species_name.replace(" ", "-")
+                    jbrowse_menu_url = "{root_url}/sp/{genus_sp}/feature/{Genus}/{species_strain_sex}/mRNA/{id}".format(
+                        root_url=root_url,
+                        genus_sp=sp_wf_param.genus_species,
+                        Genus=sp_wf_param.genus_uppercase,
+                        species_strain_sex=species_strain_sex,
+                        id="{id}")
+
+                    # Replace values in the workflow dictionary
+                    jbrowse_tool_state = workflow_dict["steps"][constants_phaeo.WF_LOAD_GFF_JB_1_ORG_STEP_JBROWSE]["tool_state"]
+                    jbrowse_tool_state = jbrowse_tool_state.replace("__MENU_URL_ORG__", jbrowse_menu_url)
+                    jb_to_container_tool_state = workflow_dict["steps"][constants_phaeo.WF_LOAD_GFF_JB_1_ORG_STEP_JB_TO_CONTAINER]["tool_state"]
+                    jb_to_container_tool_state = jb_to_container_tool_state\
+                        .replace("__DISPLAY_NAME_ORG__", sp_wf_param.full_name)\
+                        .replace("__UNIQUE_ID_ORG__", sp_wf_param.species_folder_name)
+
+                    # Import the workflow in galaxy as a dict
+                    sp_wf_param.instance.workflows.import_workflow_dict(workflow_dict=workflow_dict)
+
+                    # Get its attributes
+                    workflow_attributes = sp_wf_param.instance.workflows.get_workflows(name=workflow_name)
+                    # Then get its ID (required to invoke the workflow)
+                    workflow_id = workflow_attributes[0]["id"]  # Index 0 is the most recently imported workflow (the one we want)
+                    logging.debug("Workflow ID: %s" % workflow_id)
+                    # Check if the workflow is found
+                    try:
+                        show_workflow = sp_wf_param.instance.workflows.show_workflow(workflow_id=workflow_id)
+                    except bioblend.ConnectionError:
+                        logging.warning("Error finding workflow %s" % workflow_name)
+
+                    # Finally, invoke the workflow along with its datamap, parameters and the history in which to invoke it
+                    sp_wf_param.instance.workflows.invoke_workflow(
+                        workflow_id=workflow_id,
+                        history_id=sp_wf_param.history_id,
+                        params=workflow_parameters,
+                        inputs=datamap,
+                        allow_tool_state_corrections=True)
+
+                    logging.info("Successfully imported and invoked workflow {0}, check the galaxy instance for the jobs state".format(workflow_name))
+
+            if strains_count == 2:
+
+                logging.info("Input organism %s: 2 species detected in input dictionary" % species)
+                strain_sex_org1 = strains_list[0]
+                strain_sex_org2 = strains_list[1]
+                sp_wf_param_org1 = strains[strain_sex_org1]
+                sp_wf_param_org2 = strains[strain_sex_org2]
+
+                # Set workflow path (2 organisms)
+                workflow_path = os.path.join(os.path.abspath(script_dir), constants_phaeo.WORKFLOWS_PATH, constants_phaeo.WF_LOAD_GFF_JB_2_ORG_FILE)
+
+                # Check if the versions of tools specified in the workflow are installed in galaxy
+                install_changesets_revisions_from_workflow(workflow_path=workflow_path, instance=sp_wf_param_org1.instance)
+
+                # Set the workflow parameters (individual tools runtime parameters in the workflow)
+                workflow_parameters = {}
+
+                # Input files have no parameters (they are set via assigning the hda IDs in the datamap parameter of the bioblend method)
+                workflow_parameters[constants_phaeo.WF_LOAD_GFF_JB_2_ORG_INPUT_GENOME_ORG1] = {}
+                workflow_parameters[constants_phaeo.WF_LOAD_GFF_JB_2_ORG_INPUT_GFF_ORG1] = {}
+                workflow_parameters[constants_phaeo.WF_LOAD_GFF_JB_2_ORG_INPUT_PROTEINS_ORG1] = {}
+                workflow_parameters[constants_phaeo.WF_LOAD_GFF_JB_2_ORG_INPUT_GENOME_ORG2] = {}
+                workflow_parameters[constants_phaeo.WF_LOAD_GFF_JB_2_ORG_INPUT_GFF_ORG2] = {}
+                workflow_parameters[constants_phaeo.WF_LOAD_GFF_JB_2_ORG_INPUT_PROTEINS_ORG2] = {}
+
+                # Organism 1
+                workflow_parameters[constants_phaeo.WF_LOAD_GFF_JB_2_ORG_STEP_LOAD_FASTA_ORG1] = {
+                    "organism": sp_wf_param_org1.org_id,
+                    "analysis_id": sp_wf_param_org1.genome_analysis_id,
+                    "do_update": "true"}
+                # workflow_parameters[JBROWSE_ORG1] = {"jbrowse_menu_url": jbrowse_menu_url_org1}
+                workflow_parameters[constants_phaeo.WF_LOAD_GFF_JB_2_ORG_STEP_JBROWSE_ORG1] = {}
+                workflow_parameters[constants_phaeo.WF_LOAD_GFF_JB_2_ORG_STEP_LOAD_GFF_ORG1] = {
+                    "organism": sp_wf_param_org1.org_id,
+                    "analysis_id": sp_wf_param_org1.ogs_analysis_id}
+                workflow_parameters[constants_phaeo.WF_LOAD_GFF_JB_2_ORG_STEP_FEATURE_SYNC_ORG1] = {
+                    "organism_id": sp_wf_param_org1.org_id}
+                # workflow_parameters[JBROWSE_CONTAINER] = {"organisms": [{"name": org1_full_name, "unique_id": org1_species_folder_name, }, {"name": org2_full_name, "unique_id": org2_species_folder_name}]}
+                workflow_parameters[constants_phaeo.WF_LOAD_GFF_JB_2_ORG_STEP_JB_TO_CONTAINER] = {}
+
+                # Organism 2
+                workflow_parameters[constants_phaeo.WF_LOAD_GFF_JB_2_ORG_STEP_LOAD_FASTA_ORG2] = {
+                    "organism": sp_wf_param_org2.org_id,
+                    "analysis_id": sp_wf_param_org2.genome_analysis_id,
+                    "do_update": "true"}
+                workflow_parameters[constants_phaeo.WF_LOAD_GFF_JB_2_ORG_STEP_LOAD_GFF_ORG2] = {
+                    "organism": sp_wf_param_org2.org_id,
+                    "analysis_id": sp_wf_param_org2.ogs_analysis_id}
+                # workflow_parameters[JRBOWSE_ORG2] = {"jbrowse_menu_url": jbrowse_menu_url_org2}
+                workflow_parameters[constants_phaeo.WF_LOAD_GFF_JB_2_ORG_STEP_JRBOWSE_ORG2] = {}
+                workflow_parameters[constants_phaeo.WF_LOAD_GFF_JB_2_ORG_STEP_FEATURE_SYNC_ORG2] = {
+                    "organism_id": sp_wf_param_org2.org_id}
+
+                # POPULATE + INDEX DATA
+                workflow_parameters[constants_phaeo.WF_LOAD_GFF_JB_2_ORG_STEP_POPULATE_VIEWS] = {}
+                workflow_parameters[constants_phaeo.WF_LOAD_GFF_JB_2_ORG_STEP_INDEX] = {}
+
+                # Set datamap (mapping of input files in the workflow)
+                datamap = {}
+
+                # Organism 1
+                datamap[constants_phaeo.WF_LOAD_GFF_JB_2_ORG_INPUT_GENOME_ORG1] = {"src": "hda", "id": sp_wf_param_org1.genome_hda_id}
+                datamap[constants_phaeo.WF_LOAD_GFF_JB_2_ORG_INPUT_GFF_ORG1] = {"src": "hda", "id": sp_wf_param_org1.gff_hda_id}
+                datamap[constants_phaeo.WF_LOAD_GFF_JB_2_ORG_INPUT_PROTEINS_ORG1] = {"src": "hda", "id": sp_wf_param_org1.proteins_hda_id}
+
+                # Organism 2
+                datamap[constants_phaeo.WF_LOAD_GFF_JB_2_ORG_INPUT_GENOME_ORG2] = {"src": "hda", "id": sp_wf_param_org2.genome_hda_id}
+                datamap[constants_phaeo.WF_LOAD_GFF_JB_2_ORG_INPUT_GFF_ORG2] = {"src": "hda", "id": sp_wf_param_org2.gff_hda_id}
+                datamap[constants_phaeo.WF_LOAD_GFF_JB_2_ORG_INPUT_PROTEINS_ORG2] = {"src": "hda", "id": sp_wf_param_org2.proteins_hda_id}
+
+                with open(workflow_path, 'r') as ga_in_file:
+
+                    # Store the decoded json dictionary
+                    workflow_dict = json.load(ga_in_file)
+                    workflow_name = workflow_dict["name"]
+
+                    # For the Jbrowse tool, we unfortunately have to manually edit the parameters instead of setting them
+                    # as runtime values, using runtime parameters makes the tool throw an internal critical error ("replace not found" error)
+                    # Scratchgmod test: need "http" (or "https"), the hostname (+ port)
+                    if constants.CONF_JBROWSE_MENU_URL not in config.keys():
+                        # default
+                        root_url = "https://{0}".format(config[constants.CONF_ALL_HOSTNAME])
+                    else:
+                        root_url = config[constants.CONF_JBROWSE_MENU_URL]
+                    species_strain_sex_org1 = sp_wf_param_org1.chado_species_name.replace(" ", "-")
+                    species_strain_sex_org2 = sp_wf_param_org2.chado_species_name.replace(" ", "-")
+                    jbrowse_menu_url_org1 = "{root_url}/sp/{genus_sp}/feature/{Genus}/{species_strain_sex}/mRNA/{id}".format(
+                        root_url=root_url,
+                        genus_sp=sp_wf_param_org1.genus_species,
+                        Genus=sp_wf_param_org1.genus_uppercase,
+                        species_strain_sex=species_strain_sex_org1,
+                        id="{id}")
+                    jbrowse_menu_url_org2 = "{root_url}/sp/{genus_sp}/feature/{Genus}/{species_strain_sex}/mRNA/{id}".format(
+                        root_url=root_url,
+                        genus_sp=sp_wf_param_org2.genus_species,
+                        Genus=sp_wf_param_org2.genus_uppercase,
+                        species_strain_sex=species_strain_sex_org2,
+                        id="{id}")
+
+                    # Replace values in the workflow dictionary
+                    jbrowse_tool_state_org1 = workflow_dict["steps"][constants_phaeo.WF_LOAD_GFF_JB_2_ORG_STEP_JBROWSE_ORG1]["tool_state"]
+                    jbrowse_tool_state_org1 = jbrowse_tool_state_org1.replace("__MENU_URL_ORG1__", jbrowse_menu_url_org1)
+                    jbrowse_tool_state_org2 = workflow_dict["steps"][constants_phaeo.WF_LOAD_GFF_JB_2_ORG_STEP_JRBOWSE_ORG2]["tool_state"]
+                    jbrowse_tool_state_org2 = jbrowse_tool_state_org2.replace("__MENU_URL_ORG2__", jbrowse_menu_url_org2)
+                    # The UNIQUE_ID is specific to a combination genus_species_strain_sex so every combination should have its unique workflow
+                    # in galaxy --> define a naming method for these workflows
+                    jb_to_container_tool_state = workflow_dict["steps"][constants_phaeo.WF_LOAD_GFF_JB_2_ORG_STEP_JB_TO_CONTAINER]["tool_state"]
+                    jb_to_container_tool_state = jb_to_container_tool_state\
+                        .replace("__DISPLAY_NAME_ORG1__", sp_wf_param_org1.full_name)\
+                        .replace("__UNIQUE_ID_ORG1__", sp_wf_param_org1.species_folder_name)\
+                        .replace("__DISPLAY_NAME_ORG2__", sp_wf_param_org2.full_name)\
+                        .replace("__UNIQUE_ID_ORG2__", sp_wf_param_org2.species_folder_name)
+
+                    # Import the workflow in galaxy as a dict
+                    sp_wf_param_org1.instance.workflows.import_workflow_dict(workflow_dict=workflow_dict)
+
+                    # Get its attributes
+                    workflow_attributes = sp_wf_param_org1.instance.workflows.get_workflows(name=workflow_name)
+                    # Then get its ID (required to invoke the workflow)
+                    workflow_id = workflow_attributes[0]["id"]  # Index 0 is the most recently imported workflow (the one we want)
+                    logging.debug("Workflow ID: %s" % workflow_id)
+                    # Check if the workflow is found
+                    try:
+                        show_workflow = sp_wf_param_org1.instance.workflows.show_workflow(workflow_id=workflow_id)
+                    except bioblend.ConnectionError:
+                        logging.warning("Error finding workflow %s" % workflow_name)
+
+                    # Finally, invoke the workflow alogn with its datamap, parameters and the history in which to invoke it
+                    sp_wf_param_org1.instance.workflows.invoke_workflow(
+                        workflow_id=workflow_id,
+                        history_id=sp_wf_param_org1.history_id,
+                        params=workflow_parameters,
+                        inputs=datamap,
+                        allow_tool_state_corrections=True)
+
+                    logging.info("Successfully imported and invoked workflow {0}, check the galaxy instance for the jobs state".format(workflow_name))
+
+    if workflow_type == constants_phaeo.WORKFLOW_BLAST:
+        for sp_dict in sp_dict_list:
+
+            # Add and retrieve all analyses/organisms for the current input species and add their IDs to the input dictionary
+            sp_wf_param = get_sp_workflow_param(sp_dict, main_dir=args.main_directory, config=config, workflow_type=constants_phaeo.WORKFLOW_BLAST)
+
+            current_sp_genus_species = list(sp_wf_param.keys())[0]
+            current_sp_genus_species_dict = list(sp_wf_param.values())[0]
+            current_sp_strain_sex = list(current_sp_genus_species_dict.keys())[0]
+            current_sp_strain_sex_attributes_dict = list(current_sp_genus_species_dict.values())[0]
+
+            # Add the species dictionary to the complete dictionary
+            # This dictionary contains every organism present in the input file
+            # Its structure is the following:
+            # {genus species: {strain1_sex1: {variables_key: variables_values}, strain1_sex2: {variables_key: variables_values}}}
+            if not current_sp_genus_species in all_sp_workflow_dict.keys():
+                all_sp_workflow_dict[current_sp_genus_species] = current_sp_genus_species_dict
+            else:
+                all_sp_workflow_dict[current_sp_genus_species][current_sp_strain_sex] = current_sp_strain_sex_attributes_dict
+
+        if len(list(strains.keys())) == 1:
+            logging.info("Input organism %s: 1 species detected in input dictionary" % species)
+
+            # Set workflow path (1 organism)
+            workflow_path = os.path.join(os.path.abspath(script_dir), "workflows_phaeoexplorer/Galaxy-Workflow-load_blast_results_1org_v1.ga")
+
+            # Instance object required variables
+            instance_url, email, password = None, None, None
+
+            # Set the galaxy instance variables
+            for k2, v2 in strains.items():
+                instance_url = v2["instance_url"]
+                email = v2["email"]
+                password = v2["password"]
+
+            instance = galaxy.GalaxyInstance(url=instance_url, email=email, password=password)
+
+            # Check if the versions of tools specified in the workflow are installed in galaxy
+            install_changesets_revisions_from_workflow(workflow_path=workflow_path, instance=instance)
+
+            organisms_key_name = list(strains.keys())
+            org_dict = strains[organisms_key_name[0]]
+
+            history_id = org_dict["history_id"]
+
+            # Organism attributes
+            org_genus = org_dict["genus"]
+            org_species = org_dict["species"]
+            org_genus_species = org_dict["genus_species"]
+            org_species_folder_name = org_dict["species_folder_name"]
+            org_full_name = org_dict["full_name"]
+            org_strain = org_dict["sex"]
+            org_sex = org_dict["strain"]
+            org_org_id = org_dict["org_id"]
+            org_blastp_analysis_id = org_dict["blastp_analysis_id"]
+            org_blastp_hda_id = org_dict["hda_ids"]["blastp_hda_id"]
+
+            # Store these values into a dict for parameters logging/validation
+            org_parameters_dict = {
+                "org_genus": org_genus,
+                "org_species": org_species,
+                "org_genus_species": org_genus_species,
+                "org_species_folder_name": org_species_folder_name,
+                "org_full_name": org_full_name,
+                "org_strain": org_strain,
+                "org_sex": org_sex,
+                "org_org_id": org_org_id,
+                "org_blast_analysis_id": org_blastp_analysis_id,
+                "org_blastp_hda_id": org_blastp_hda_id,
+            }
+
+            # Look for empty parameters values, throw a critical error if a parameter value is invalid
+            for param_name, param_value in org_parameters_dict.items():
+                if param_value is None or param_value == "":
+                    logging.critical("Empty parameter value found for organism {0} (parameter: {1}, parameter value: {2})".format(org_full_name, param_name, param_value))
+                    sys.exit()
+
+            BLASTP_FILE = "0"
+            LOAD_BLASTP_FILE = "1"
+            WF_LOAD_GFF_JB_2_ORG_STEP_POPULATE_VIEWS = "2"
+            WF_LOAD_GFF_JB_2_ORG_STEP_INDEX = "3"
+
+            # Set the workflow parameters (individual tools runtime parameters in the workflow)
+            workflow_parameters = {}
+            workflow_parameters[BLASTP_FILE] = {}
+            workflow_parameters[LOAD_BLASTP_FILE] = {"analysis_id": org_blastp_analysis_id, "organism_id": org_org_id}
+            workflow_parameters[WF_LOAD_GFF_JB_2_ORG_STEP_POPULATE_VIEWS] = {}
+            workflow_parameters[WF_LOAD_GFF_JB_2_ORG_STEP_INDEX] = {}
+
+            datamap = {}
+            datamap[BLASTP_FILE] = {"src": "hda", "id": org_blastp_hda_id}
+
+            with open(workflow_path, 'r') as ga_in_file:
+                # Store the decoded json dictionary
+                workflow_dict = json.load(ga_in_file)
+                workflow_name = workflow_dict["name"]
+
+                # Import the workflow in galaxy as a dict
+                instance.workflows.import_workflow_dict(workflow_dict=workflow_dict)
+                # Get its attributes
+                workflow_attributes = instance.workflows.get_workflows(name=workflow_name)
+                # Then get its ID (required to invoke the workflow)
+                workflow_id = workflow_attributes[0]["id"]  # Index 0 is the most recently imported workflow (the one we want)
+                show_workflow = instance.workflows.show_workflow(workflow_id=workflow_id)
+                # Check if the workflow is found
+                try:
+                    logging.debug("Workflow ID: %s" % workflow_id)
+                except bioblend.ConnectionError:
+                    logging.warning("Error finding workflow %s" % workflow_name)
+
+                # Finally, invoke the workflow alogn with its datamap, parameters and the history in which to invoke it
+                instance.workflows.invoke_workflow(workflow_id=workflow_id, history_id=history_id, params=workflow_parameters, inputs=datamap, allow_tool_state_corrections=True)
+
+                logging.info("Successfully imported and invoked workflow {0}, check the galaxy instance ({1}) for the jobs state".format(workflow_name, instance_url))
+
+
+
+        if len(list(strains.keys())) == 2:
+
+            logging.info("Input organism %s: 2 species detected in input dictionary" % species)
+
+            # Set workflow path (2 organisms)
+            workflow_path = os.path.join(os.path.abspath(script_dir), "workflows_phaeoexplorer/Galaxy-Workflow-load_blast_results_2org_v1.ga")
+
+            # Instance object required variables
+            instance_url, email, password = None, None, None
+
+            # Set the galaxy instance variables
+            for k2, v2 in strains.items():
+                instance_url = v2["instance_url"]
+                email = v2["email"]
+                password = v2["password"]
+
+            instance = galaxy.GalaxyInstance(url=instance_url, email=email, password=password)
+
+            # Check if the versions of tools specified in the workflow are installed in galaxy
+            install_changesets_revisions_from_workflow(workflow_path=workflow_path, instance=instance)
+
+            organisms_key_names = list(strains.keys())
+            org1_dict = strains[organisms_key_names[0]]
+            org2_dict = strains[organisms_key_names[1]]
+
+            history_id = org1_dict["history_id"]
+
+            # Organism 1 attributes
+            org1_genus = org1_dict["genus"]
+            org1_species = org1_dict["species"]
+            org1_genus_species = org1_dict["genus_species"]
+            org1_species_folder_name = org1_dict["species_folder_name"]
+            org1_full_name = org1_dict["full_name"]
+            org1_strain = org1_dict["sex"]
+            org1_sex = org1_dict["strain"]
+            org1_org_id = org1_dict["org_id"]
+            org1_blastp_analysis_id = org1_dict["blastp_analysis_id"]
+            org1_blastp_hda_id = org1_dict["hda_ids"]["blastp_hda_id"]
+
+            # Store these values into a dict for parameters logging/validation
+            org1_parameters_dict = {
+                "org1_genus": org1_genus,
+                "org1_species": org1_species,
+                "org1_genus_species": org1_genus_species,
+                "org1_species_folder_name": org1_species_folder_name,
+                "org1_full_name": org1_full_name,
+                "org1_strain": org1_strain,
+                "org1_sex": org1_sex,
+                "org1_org_id": org1_org_id,
+                "org1_blast_analysis_id": org1_blastp_analysis_id,
+                "org1_blastp_hda_id": org1_blastp_hda_id,
+            }
+
+
+            # Look for empty parameters values, throw a critical error if a parameter value is invalid
+            for param_name, param_value in org1_parameters_dict.items():
+                if param_value is None or param_value == "":
+                    logging.critical("Empty parameter value found for organism {0} (parameter: {1}, parameter value: {2})".format(org1_full_name, param_name, param_value))
+                    sys.exit()
+
+            # Organism 2 attributes
+            org2_genus = org2_dict["genus"]
+            org2_species = org2_dict["species"]
+            org2_genus_species = org2_dict["genus_species"]
+            org2_species_folder_name = org2_dict["species_folder_name"]
+            org2_full_name = org2_dict["full_name"]
+            org2_strain = org2_dict["sex"]
+            org2_sex = org2_dict["strain"]
+            org2_org_id = org2_dict["org_id"]
+            org2_blastp_analysis_id = org2_dict["blastp_analysis_id"]
+            org2_blastp_hda_id = org2_dict["hda_ids"]["blastp_hda_id"]
+
+            # Store these values into a dict for parameters logging/validation
+            org2_parameters_dict = {
+                "org2_genus": org2_genus,
+                "org2_species": org2_species,
+                "org2_genus_species": org2_genus_species,
+                "org2_species_folder_name": org_species_folder_name,
+                "org2_full_name": org2_full_name,
+                "org2_strain": org2_strain,
+                "org2_sex": org2_sex,
+                "org2_org_id": org2_org_id,
+                "org2_blast_analysis_id": org2_blastp_analysis_id,
+                "org2_blastp_hda_id": org2_blastp_hda_id,
+            }
+
+
+            # Look for empty parameters values, throw a critical error if a parameter value is invalid
+            for param_name, param_value in org2_parameters_dict.items():
+                if param_value is None or param_value == "":
+                    logging.critical("Empty parameter value found for organism {0} (parameter: {1}, parameter value: {2})".format(org2_full_name, param_name, param_value))
+                    sys.exit()
+
+            # Source files association (ordered by their IDs in the workflow)
+            # WARNING: Be very careful about how the workflow is "organized" (i.e the order of the steps/datasets, check the .ga if there is any error)
+            BLASTP_FILE_ORG1 = "0"
+            BLASTP_FILE_ORG2 = "1"
+            LOAD_BLASTP_FILE_ORG1 = "2"
+            LOAD_BLASTP_FILE_ORG2 = "3"
+            WF_LOAD_GFF_JB_2_ORG_STEP_POPULATE_VIEWS = "4"
+            WF_LOAD_GFF_JB_2_ORG_STEP_INDEX = "5"
+
+            # Set the workflow parameters (individual tools runtime parameters in the workflow)
+            workflow_parameters = {}
+
+            # Input files have no parameters (they are set via assigning the hda IDs in the datamap parameter of the bioblend method)
+            workflow_parameters[BLASTP_FILE_ORG1] = {}
+            workflow_parameters[BLASTP_FILE_ORG2] = {}
+
+            # Organism 1
+            workflow_parameters[LOAD_BLASTP_FILE_ORG1] = {"organism_id": org1_org_id,
+                                                          "analysis_id": org1_blastp_analysis_id}
+
+            # Organism 2
+            workflow_parameters[LOAD_BLASTP_FILE_ORG2] = {"organism_id": org2_org_id,
+                                                          "analysis_id": org2_blastp_analysis_id}
+
+            workflow_parameters[WF_LOAD_GFF_JB_2_ORG_STEP_POPULATE_VIEWS] = {}
+            workflow_parameters[WF_LOAD_GFF_JB_2_ORG_STEP_INDEX] = {}
+
+            # Set datamap (mapping of input files in the workflow)
+            datamap = {}
+
+            # Organism 1
+            datamap[BLASTP_FILE_ORG1] = {"src": "hda", "id": org1_blastp_hda_id}
+
+            # Organism 2
+            datamap[BLASTP_FILE_ORG2] = {"src": "hda", "id": org2_blastp_hda_id}
+
+            with open(workflow_path, 'r') as ga_in_file:
+                # Store the decoded json dictionary
+                workflow_dict = json.load(ga_in_file)
+                workflow_name = workflow_dict["name"]
+
+                # Import the workflow in galaxy as a dict
+                instance.workflows.import_workflow_dict(workflow_dict=workflow_dict)
+                # Get its attributes
+                workflow_attributes = instance.workflows.get_workflows(name=workflow_name)
+                # Then get its ID (required to invoke the workflow)
+                workflow_id = workflow_attributes[0]["id"]  # Index 0 is the most recently imported workflow (the one we want)
+                show_workflow = instance.workflows.show_workflow(workflow_id=workflow_id)
+                # Check if the workflow is found
+                try:
+                    logging.debug("Workflow ID: %s" % workflow_id)
+                except bioblend.ConnectionError:
+                    logging.warning("Error finding workflow %s" % workflow_name)
+
+                # Finally, invoke the workflow alogn with its datamap, parameters and the history in which to invoke it
+                instance.workflows.invoke_workflow(workflow_id=workflow_id, history_id=history_id, params=workflow_parameters, inputs=datamap, allow_tool_state_corrections=True)
+
+                logging.info("Successfully imported and invoked workflow {0}, check the galaxy instance ({1}) for the jobs state".format(workflow_name, instance_url))
+
+
+    if workflow_type == "interpro":
+        for sp_dict in sp_dict_list:
+
+            # Add and retrieve all analyses/organisms for the current input species and add their IDs to the input dictionary
+            sp_wf_param = get_sp_workflow_param(sp_dict, main_dir=args.main_directory, config=config, workfow_type="blast")
+
+            current_sp_genus_species = list(sp_wf_param.keys())[0]
+            current_sp_genus_species_dict = list(sp_wf_param.values())[0]
+            current_sp_strain_sex = list(current_sp_genus_species_dict.keys())[0]
+            current_sp_strain_sex_attributes_dict = list(current_sp_genus_species_dict.values())[0]
+
+            # Add the species dictionary to the complete dictionary
+            # This dictionary contains every organism present in the input file
+            # Its structure is the following:
+            # {genus species: {strain1_sex1: {variables_key: variables_values}, strain1_sex2: {variables_key: variables_values}}}
+            if not current_sp_genus_species in all_sp_workflow_dict.keys():
+                all_sp_workflow_dict[current_sp_genus_species] = current_sp_genus_species_dict
+            else:
+                all_sp_workflow_dict[current_sp_genus_species][current_sp_strain_sex] = current_sp_strain_sex_attributes_dict
+
+        if len(list(strains.keys())) == 1:
+            logging.info("Input organism %s: 1 species detected in input dictionary" % species)
+
+            # Set workflow path (1 organism)
+            workflow_path = os.path.join(os.path.abspath(script_dir), "workflows_phaeoexplorer/Galaxy-Workflow-load_blast_results_1org_v1.ga")
+
+            # Instance object required variables
+            instance_url, email, password = None, None, None
+
+            # Set the galaxy instance variables
+            for k2, v2 in strains.items():
+                instance_url = v2["instance_url"]
+                email = v2["email"]
+                password = v2["password"]
+
+            instance = galaxy.GalaxyInstance(url=instance_url, email=email, password=password)
+
+            # Check if the versions of tools specified in the workflow are installed in galaxy
+            install_changesets_revisions_from_workflow(workflow_path=workflow_path, instance=instance)
+
+            organism_key_name = list(strains.keys())
+            org_dict = strains[organisms_key_name[0]]
+
+            history_id = org_dict["history_id"]
+
+            # Organism attributes
+            org_genus = org_dict["genus"]
+            org_species = org_dict["species"]
+            org_genus_species = org_dict["genus_species"]
+            org_species_folder_name = org_dict["species_folder_name"]
+            org_full_name = org_dict["full_name"]
+            org_strain = org_dict["sex"]
+            org_sex = org_dict["strain"]
+            org_org_id = org_dict["org_id"]
+            org_inteproscan_analysis_id = org_dict["inteproscan_analysis_id"]
+            org_interproscan_hda_id = org_dict["hda_ids"]["interproscan_hda_id"]
+
+            # Store these values into a dict for parameters logging/validation
+            org_parameters_dict = {
+                "org_genus": org_genus,
+                "org_species": org_species,
+                "org_genus_species": org_genus_species,
+                "org_species_folder_name": org_species_folder_name,
+                "org_full_name": org_full_name,
+                "org_strain": org_strain,
+                "org_sex": org_sex,
+                "org_org_id": org_org_id,
+                "org_inteproscan_analysis_id": org_inteproscan_analysis_id,
+                "org_interproscan_hda_id": org_interproscan_hda_id,
+            }
+
+            # Look for empty parameters values, throw a critical error if a parameter value is invalid
+            for param_name, param_value in org_parameters_dict.items():
+                if param_value is None or param_value == "":
+                    logging.critical("Empty parameter value found for organism {0} (parameter: {1}, parameter value: {2})".format(org_full_name, param_name, param_value))
+                    sys.exit()
+
+            INTEPRO_FILE = "0"
+            LOAD_INTERPRO_FILE = "1"
+            WF_LOAD_GFF_JB_2_ORG_STEP_POPULATE_VIEWS = "2"
+            WF_LOAD_GFF_JB_2_ORG_STEP_INDEX = "3"
+
+            # Set the workflow parameters (individual tools runtime parameters in the workflow)
+            workflow_parameters = {}
+            workflow_parameters[INTEPRO_FILE] = {}
+            workflow_parameters[LOAD_INTERPRO_FILE] = {"analysis_id": org_inteproscan_analysis_id, "organism_id": org_org_id}
+            workflow_parameters[WF_LOAD_GFF_JB_2_ORG_STEP_POPULATE_VIEWS] = {}
+            workflow_parameters[WF_LOAD_GFF_JB_2_ORG_STEP_INDEX] = {}
+
+            datamap = {}
+            datamap[INTEPRO_FILE] = {"src": "hda", "id": org_interproscan_hda_id}
+
+            with open(workflow_path, 'r') as ga_in_file:
+                # Store the decoded json dictionary
+                workflow_dict = json.load(ga_in_file)
+                workflow_name = workflow_dict["name"]
+
+                # Import the workflow in galaxy as a dict
+                instance.workflows.import_workflow_dict(workflow_dict=workflow_dict)
+                # Get its attributes
+                workflow_attributes = instance.workflows.get_workflows(name=workflow_name)
+                # Then get its ID (required to invoke the workflow)
+                workflow_id = workflow_attributes[0]["id"]  # Index 0 is the most recently imported workflow (the one we want)
+                show_workflow = instance.workflows.show_workflow(workflow_id=workflow_id)
+                # Check if the workflow is found
+                try:
+                    logging.debug("Workflow ID: %s" % workflow_id)
+                except bioblend.ConnectionError:
+                    logging.warning("Error finding workflow %s" % workflow_name)
+
+                # Finally, invoke the workflow alogn with its datamap, parameters and the history in which to invoke it
+                instance.workflows.invoke_workflow(workflow_id=workflow_id, history_id=history_id, params=workflow_parameters, inputs=datamap, allow_tool_state_corrections=True)
+
+                logging.info("Successfully imported and invoked workflow {0}, check the galaxy instance ({1}) for the jobs state".format(workflow_name, instance_url))
+
+
+
+        if len(list(strains.keys())) == 2:
+
+            logging.info("Input organism %s: 2 species detected in input dictionary" % species)
+
+            # Set workflow path (2 organisms)
+            workflow_path = os.path.join(os.path.abspath(script_dir), "workflows_phaeoexplorer/Galaxy-Workflow-load_blast_results_2org_v1.ga")
+
+            # Instance object required variables
+            instance_url, email, password = None, None, None
+
+            # Set the galaxy instance variables
+            for k2, v2 in strains.items():
+                instance_url = v2["instance_url"]
+                email = v2["email"]
+                password = v2["password"]
+
+            instance = galaxy.GalaxyInstance(url=instance_url, email=email, password=password)
+
+            # Check if the versions of tools specified in the workflow are installed in galaxy
+            install_changesets_revisions_from_workflow(workflow_path=workflow_path, instance=instance)
+
+            organisms_key_names = list(strains.keys())
+            org1_dict = strains[organisms_key_names[0]]
+            org2_dict = strains[organisms_key_names[1]]
+
+            history_id = org1_dict["history_id"]
+
+            # Organism 1 attributes
+            org1_genus = org1_dict["genus"]
+            org1_species = org1_dict["species"]
+            org1_genus_species = org1_dict["genus_species"]
+            org1_species_folder_name = org1_dict["species_folder_name"]
+            org1_full_name = org1_dict["full_name"]
+            org1_strain = org1_dict["sex"]
+            org1_sex = org1_dict["strain"]
+            org1_org_id = org1_dict["org_id"]
+            org1_interproscan_analysis_id = org1_dict["interproscan_analysis_id"]
+            org1_interproscan_hda_id = org1_dict["hda_ids"]["interproscan_hda_id"]
+
+            # Store these values into a dict for parameters logging/validation
+            org1_parameters_dict = {
+                "org1_genus": org1_genus,
+                "org1_species": org1_species,
+                "org1_genus_species": org1_genus_species,
+                "org1_species_folder_name": org1_species_folder_name,
+                "org1_full_name": org1_full_name,
+                "org1_strain": org1_strain,
+                "org1_sex": org1_sex,
+                "org1_org_id": org1_org_id,
+                "org1_interproscan_analysis_id": org1_interproscan_analysis_id,
+                "org1_interproscan_hda_id": org1_interproscan_hda_id,
+            }
+
+
+            # Look for empty parameters values, throw a critical error if a parameter value is invalid
+            for param_name, param_value in org1_parameters_dict.items():
+                if param_value is None or param_value == "":
+                    logging.critical("Empty parameter value found for organism {0} (parameter: {1}, parameter value: {2})".format(org1_full_name, param_name, param_value))
+                    sys.exit()
+
+            # Organism 2 attributes
+            org2_genus = org2_dict["genus"]
+            org2_species = org2_dict["species"]
+            org2_genus_species = org2_dict["genus_species"]
+            org2_species_folder_name = org2_dict["species_folder_name"]
+            org2_full_name = org2_dict["full_name"]
+            org2_strain = org2_dict["sex"]
+            org2_sex = org2_dict["strain"]
+            org2_org_id = org2_dict["org_id"]
+            org2_interproscan_analysis_id = org2_dict["interproscan_analysis_id"]
+            org2_interproscan_hda_id = org2_dict["hda_ids"]["interproscan_hda_id"]
+
+            # Store these values into a dict for parameters logging/validation
+            org2_parameters_dict = {
+                "org2_genus": org2_genus,
+                "org2_species": org2_species,
+                "org2_genus_species": org2_genus_species,
+                "org2_species_folder_name": org_species_folder_name,
+                "org2_full_name": org2_full_name,
+                "org2_strain": org2_strain,
+                "org2_sex": org2_sex,
+                "org2_org_id": org2_org_id,
+                "org2_interproscan_analysis_id": org2_interproscan_analysis_id,
+                "org2_interproscan_hda_id": org2_interproscan_hda_id,
+            }
+
+
+            # Look for empty parameters values, throw a critical error if a parameter value is invalid
+            for param_name, param_value in org2_parameters_dict.items():
+                if param_value is None or param_value == "":
+                    logging.critical("Empty parameter value found for organism {0} (parameter: {1}, parameter value: {2})".format(org2_full_name, param_name, param_value))
+                    sys.exit()
+
+            # Source files association (ordered by their IDs in the workflow)
+            # WARNING: Be very careful about how the workflow is "organized" (i.e the order of the steps/datasets, check the .ga if there is any error)
+            INTERPRO_FILE_ORG1 = "0"
+            INTERPRO_FILE_ORG2 = "1"
+            LOAD_INTERPRO_FILE_ORG1 = "2"
+            LOAD_INTERPRO_FILE_ORG2 = "3"
+            WF_LOAD_GFF_JB_2_ORG_STEP_POPULATE_VIEWS = "4"
+            WF_LOAD_GFF_JB_2_ORG_STEP_INDEX = "5"
+
+            # Set the workflow parameters (individual tools runtime parameters in the workflow)
+            workflow_parameters = {}
+
+            # Input files have no parameters (they are set via assigning the hda IDs in the datamap parameter of the bioblend method)
+            workflow_parameters[INTERPRO_FILE_ORG1] = {}
+            workflow_parameters[INTERPRO_FILE_ORG2] = {}
+
+            # Organism 1
+            workflow_parameters[LOAD_INTERPRO_FILE_ORG1] = {"organism_id": org1_org_id,
+                                                          "analysis_id": org1_interproscan_analysis_id}
+
+            # Organism 2
+            workflow_parameters[LOAD_INTERPRO_FILE_ORG2] = {"organism_id": org2_org_id,
+                                                          "analysis_id": org2_interproscan_analysis_id}
+
+            workflow_parameters[WF_LOAD_GFF_JB_2_ORG_STEP_POPULATE_VIEWS] = {}
+            workflow_parameters[WF_LOAD_GFF_JB_2_ORG_STEP_INDEX] = {}
+
+            # Set datamap (mapping of input files in the workflow)
+            datamap = {}
+
+            # Organism 1
+            datamap[BLASTP_FILE_ORG1] = {"src": "hda", "id": org1_interproscan_hda_id}
+
+            # Organism 2
+            datamap[BLASTP_FILE_ORG2] = {"src": "hda", "id": org2_interproscan_hda_id}
+
+            with open(workflow_path, 'r') as ga_in_file:
+                # Store the decoded json dictionary
+                workflow_dict = json.load(ga_in_file)
+                workflow_name = workflow_dict["name"]
+
+                # Import the workflow in galaxy as a dict
+                instance.workflows.import_workflow_dict(workflow_dict=workflow_dict)
+                # Get its attributes
+                workflow_attributes = instance.workflows.get_workflows(name=workflow_name)
+                # Then get its ID (required to invoke the workflow)
+                workflow_id = workflow_attributes[0]["id"]  # Index 0 is the most recently imported workflow (the one we want)
+                show_workflow = instance.workflows.show_workflow(workflow_id=workflow_id)
+                # Check if the workflow is found
+                try:
+                    logging.debug("Workflow ID: %s" % workflow_id)
+                except bioblend.ConnectionError:
+                    logging.warning("Error finding workflow %s" % workflow_name)
+
+                # Finally, invoke the workflow alogn with its datamap, parameters and the history in which to invoke it
+                instance.workflows.invoke_workflow(workflow_id=workflow_id, history_id=history_id, params=workflow_parameters, inputs=datamap, allow_tool_state_corrections=True)
+
+                logging.info("Successfully imported and invoked workflow {0}, check the galaxy instance ({1}) for the jobs state".format(workflow_name, instance_url))
diff --git a/gga_run_workflow_phaeo_jbrowse.py b/gga_run_workflow_phaeo_jbrowse.py
new file mode 100644
index 0000000000000000000000000000000000000000..e0f42fb99f5feb49c37bd706adf9e4b4d05bf18c
--- /dev/null
+++ b/gga_run_workflow_phaeo_jbrowse.py
@@ -0,0 +1,625 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+
+import bioblend.galaxy.objects
+import argparse
+import os
+import logging
+import sys
+import json
+from bioblend.galaxy.objects import GalaxyInstance
+
+import utilities
+import utilities_bioblend
+import constants
+import constants_phaeo
+import runWorkflowPhaeo
+
+class OrgWorkflowParamJbrowse(runWorkflowPhaeo.OrgWorkflowParam):
+
+    def __init__(self, genus_uppercase, chado_species_name, full_name, species_folder_name,
+                 org_id, history_id, instance, genome_analysis_id=None, ogs_analysis_id=None,
+                 genome_hda_id=None, gff_hda_id=None, transcripts_hda_id=None, proteins_hda_id=None):
+        self.genome_analysis_id = genome_analysis_id
+        self.ogs_analysis_id = ogs_analysis_id
+        self.genome_hda_id = genome_hda_id
+        self.gff_hda_id = gff_hda_id
+        self.transcripts_hda_id = transcripts_hda_id
+        self.proteins_hda_id = proteins_hda_id
+        super().__init__(genus_uppercase, chado_species_name, full_name, species_folder_name,
+                 org_id, history_id, instance)
+
+    def check_param(self):
+        params = [self.genus_uppercase,
+                  self.chado_species_name,
+                  self.full_name,
+                  self.species_folder_name,
+                  self.org_id,
+                  self.history_id,
+                  self.instance,
+                  self.genome_analysis_id,
+                  self.ogs_analysis_id,
+                  self.genome_hda_id,
+                  self.gff_hda_id,
+                  self.transcripts_hda_id,
+                  self.proteins_hda_id]
+        utilities_bioblend.check_wf_param(self.full_name, params)
+
+class RunWorkflowJbrowse(runWorkflowPhaeo.RunWorkflow):
+    """
+    Run a workflow into the galaxy instance's history of a given species
+
+
+    This script is made to work for a Phaeoexplorer-specific workflow, but can be adapted to run any workflow,
+    provided the user creates their own workflow in a .ga format, and change the set_parameters function
+    to have the correct parameters for their workflow
+
+    """
+
+    def __init__(self, parameters_dictionary):
+
+        super().__init__(parameters_dictionary)
+
+        self.chado_species_name = " ".join(utilities.filter_empty_not_empty_items(
+            [self.species, self.strain, self.sex])["not_empty"])
+
+        self.abbreviation = self.genus_uppercase[0] + ". " + self.chado_species_name
+
+        self.common = self.genus_uppercase + " " + self.chado_species_name
+        if not self.common_name is None and self.common_name != "":
+            # common_name only is not sufficient as may not be unique between the different strains of the species and
+            # galaxy will throw error "OrgWorkflowParam"
+            self.common = self.common + " (" + self.common_name + ")"
+
+        self.genome_analysis_name = "genome v{0} of {1}".format(self.genome_version, self.full_name)
+        self.genome_analysis_programversion = "genome v{0}".format(self.genome_version)
+        self.genome_analysis_sourcename = self.full_name
+
+        self.ogs_analysis_name = "OGS{0} of {1}".format(self.ogs_version, self.full_name)
+        self.ogs_analysis_programversion = "OGS{0}".format(self.ogs_version)
+        self.ogs_analysis_sourcename = self.full_name
+
+        self.genome_hda_id = None
+        self.gff_hda_id = None
+        self.transcripts_hda_id = None
+        self.proteins_hda_id = None
+
+    def install_individual_tools(self):
+        """
+        This function is used to verify that installed tools called outside workflows have the correct versions and changesets
+        If it finds versions don't match, will install the correct version + changeset in the instance
+        Doesn't do anything if versions match
+
+        :return:
+        """
+
+        logging.info("Validating installed individual tools versions and changesets")
+
+        # Verify that the add_organism and add_analysis versions are correct in the instance
+        # changeset for 2.3.4+galaxy0 has to be manually found because there is no way to get the wanted changeset of a non installed tool via bioblend
+        # except for workflows (.ga) that already contain the changeset revisions inside the steps ids
+
+        utilities_bioblend.install_repository_revision(tool_id=constants_phaeo.GET_ORGANISMS_TOOL_ID,
+                                                       version=constants_phaeo.GET_ORGANISMS_TOOL_VERSION,
+                                                       changeset_revision=constants_phaeo.GET_ORGANISMS_TOOL_CHANGESET_REVISION,
+                                                       instance=self.instance)
+
+        utilities_bioblend.install_repository_revision(tool_id=constants_phaeo.GET_ANALYSES_TOOL_ID,
+                                                       version=constants_phaeo.GET_ANALYSES_TOOL_VERSION,
+                                                       changeset_revision=constants_phaeo.GET_ANALYSES_TOOL_CHANGESET_REVISION,
+                                                       instance=self.instance)
+
+        utilities_bioblend.install_repository_revision(tool_id=constants_phaeo.ADD_ORGANISM_TOOL_ID,
+                                                       version=constants_phaeo.ADD_ORGANISM_TOOL_VERSION,
+                                                       changeset_revision=constants_phaeo.ADD_ORGANISM_TOOL_CHANGESET_REVISION,
+                                                       instance=self.instance)
+
+        utilities_bioblend.install_repository_revision(tool_id=constants_phaeo.ADD_ANALYSIS_TOOL_ID,
+                                                       version=constants_phaeo.ADD_ANALYSIS_TOOL_VERSION,
+                                                       changeset_revision=constants_phaeo.ADD_ANALYSIS_TOOL_CHANGESET_REVISION,
+                                                       instance=self.instance)
+
+        utilities_bioblend.install_repository_revision(tool_id=constants_phaeo.ANALYSIS_SYNC_TOOL_ID,
+                                                       version=constants_phaeo.ANALYSIS_SYNC_TOOL_VERSION,
+                                                       changeset_revision=constants_phaeo.ANALYSIS_SYNC_TOOL_CHANGESET_REVISION,
+                                                       instance=self.instance)
+
+        utilities_bioblend.install_repository_revision(tool_id=constants_phaeo.ORGANISM_SYNC_TOOL_ID,
+                                                       version=constants_phaeo.ORGANISM_SYNC_TOOL_VERSION,
+                                                       changeset_revision=constants_phaeo.ORGANISM_SYNC_TOOL_CHANGESET_REVISION,
+                                                       instance=self.instance)
+
+        logging.info("Success: individual tools versions and changesets validated")
+
+    def add_organism_and_sync(self):
+
+        get_organisms_tool_dataset = utilities_bioblend.run_tool_and_download_single_output_dataset(
+            instance=self.instance,
+            tool_id=constants_phaeo.GET_ORGANISMS_TOOL_ID,
+            history_id=self.history_id,
+            tool_inputs={},
+            time_sleep=10
+        )
+        organisms_dict_list = json.loads(get_organisms_tool_dataset)  # Turn the dataset into a list for parsing
+
+        org_id = None
+
+        # Look up list of outputs (dictionaries)
+        for org_dict in organisms_dict_list:
+            if org_dict["genus"] == self.genus_uppercase and org_dict["species"] == self.chado_species_name:
+                org_id = str(org_dict["organism_id"])  # id needs to be a str to be recognized by chado tools
+
+        if org_id is None:
+            add_organism_tool_dataset = utilities_bioblend.run_tool_and_download_single_output_dataset(
+                instance=self.instance,
+                tool_id=constants_phaeo.ADD_ORGANISM_TOOL_ID,
+                history_id=self.history_id,
+                tool_inputs={"abbr": self.abbreviation,
+                             "genus": self.genus_uppercase,
+                             "species": self.chado_species_name,
+                             "common": self.common},
+                time_sleep=10
+            )
+            organism_dict = json.loads(add_organism_tool_dataset)
+            org_id = str(organism_dict["organism_id"])  # id needs to be a str to be recognized by chado tools
+
+        # Synchronize newly added organism in Tripal
+        logging.info("Synchronizing organism %s in Tripal" % self.full_name)
+        utilities_bioblend.run_tool(
+            instance=self.instance,
+            tool_id=constants_phaeo.ORGANISM_SYNC_TOOL_ID,
+            history_id=self.history_id,
+            tool_inputs={"organism_id": org_id})
+
+        return org_id
+
+    def import_datasets_into_history(self, config):
+        """
+        Find datasets in a library, get their ID and import them into the current history if they are not already
+        """
+
+        genome_ldda_id = None
+        transcripts_ldda_id = None
+        proteins_ldda_id = None
+        gff_ldda_id = None
+
+        genome_hda_id = None
+        gff_hda_id = None
+        transcripts_hda_id = None
+        proteins_hda_id = None
+
+        gio = GalaxyInstance(url=self.instance_url,
+                             email=config[constants.CONF_GALAXY_DEFAULT_ADMIN_EMAIL],
+                             password=config[constants.CONF_GALAXY_DEFAULT_ADMIN_PASSWORD]
+                             )
+        prj_lib = gio.libraries.get_previews(constants.GALAXY_LIBRARY_NAME)
+        if len(prj_lib) == 1:
+            library_id = prj_lib[0].id
+        else:
+            logging.critical("Multiple (or no one) libraries '%s' exist" % constants.GALAXY_LIBRARY_NAME)
+            sys.exit()
+        folder_dict_list = self.instance.libraries.get_folders(library_id=str(library_id))
+
+        folders_id_dict = {}
+
+        # Loop over the folders in the library and map folders names to their IDs
+        for folder_dict in folder_dict_list:
+            folders_id_dict[folder_dict["name"]] = folder_dict["id"]
+
+        # Iterating over the folders to find datasets and map datasets to their IDs
+        for folder_name, folder_id in folders_id_dict.items():
+            if folder_name == "/genome/{0}/v{1}".format(self.species_folder_name, self.genome_version):
+                sub_folder_content = self.instance.folders.show_folder(folder_id=folder_id, contents=True)
+                for value in sub_folder_content.values():
+                    for e in value:
+                        if type(e) == dict:
+                            if e["name"].endswith(self.genome_filename):
+                                genome_ldda_id = e["ldda_id"]
+
+            if folder_name == "/annotation/{0}/OGS{1}".format(self.species_folder_name, self.ogs_version):
+                sub_folder_content = self.instance.folders.show_folder(folder_id=folder_id, contents=True)
+                for value in sub_folder_content.values():
+                    for e in value:
+                        if type(e) == dict:
+                            ldda_name = e["name"]
+                            ldda_id = e["ldda_id"]
+                            if ldda_name.endswith(self.transcripts_filename):
+                                transcripts_ldda_id = ldda_id
+                            elif ldda_name.endswith(self.proteins_filename):
+                                proteins_ldda_id = ldda_id
+                            elif ldda_name.endswith(self.gff_filename):
+                                gff_ldda_id = ldda_id
+
+        hda_list = self.instance.datasets.get_datasets()
+        # Finding datasets in history (matching datasets names)
+        for hda in hda_list:
+            hda_name = hda["name"]
+            hda_id = hda["id"]
+            if hda_name == self.genome_filename:
+                genome_hda_id = hda_id
+            if hda_name ==  self.gff_filename:
+                gff_hda_id = hda_id
+            if hda_name == self.transcripts_filename:
+                transcripts_hda_id = hda_id
+            if hda_name == self.proteins_filename :
+                proteins_hda_id = hda_id
+
+        # Import each dataset into history if it is not imported
+        logging.debug("Uploading datasets into history %s" % self.history_id)
+
+        if genome_hda_id is None:
+            genome_dataset_upload = self.instance.histories.upload_dataset_from_library(history_id=self.history_id, lib_dataset_id=genome_ldda_id)
+            genome_hda_id = genome_dataset_upload["id"]
+        if gff_hda_id is  None:
+            gff_dataset_upload = self.instance.histories.upload_dataset_from_library(history_id=self.history_id, lib_dataset_id=gff_ldda_id)
+            gff_hda_id = gff_dataset_upload["id"]
+        if proteins_hda_id is None:
+            proteins_dataset_upload = self.instance.histories.upload_dataset_from_library(history_id=self.history_id, lib_dataset_id=proteins_ldda_id)
+            proteins_hda_id = proteins_dataset_upload["id"]
+        if transcripts_hda_id is None:
+            transcripts_dataset_upload = self.instance.histories.upload_dataset_from_library(history_id=self.history_id, lib_dataset_id=transcripts_ldda_id)
+            transcripts_hda_id = transcripts_dataset_upload["id"]
+
+        self.genome_hda_id = genome_hda_id
+        self.gff_hda_id = gff_hda_id
+        self.transcripts_hda_id = transcripts_hda_id
+        self.proteins_hda_id = proteins_hda_id
+
+def prepare_history_and_get_wf_param(sp_dict_list, main_dir, config):
+
+    all_org_wf_param_dict = {}
+    for sp_dict in sp_dict_list:
+
+        run_workflow_for_current_organism = RunWorkflowJbrowse(parameters_dictionary=sp_dict)
+
+        # Verifying the galaxy container is running
+        if not utilities_bioblend.check_galaxy_state(network_name=run_workflow_for_current_organism.genus_species,
+                                                     script_dir=run_workflow_for_current_organism.script_dir):
+            logging.critical(
+                "The galaxy container for %s is not ready yet!" % run_workflow_for_current_organism.genus_species)
+            sys.exit()
+
+        else:
+
+            # Setting some of the instance attributes
+            run_workflow_for_current_organism.main_dir = main_dir
+
+            run_workflow_for_current_organism.set_galaxy_instance(config)
+            run_workflow_for_current_organism.set_history()
+            run_workflow_for_current_organism.install_individual_tools()
+            run_workflow_for_current_organism.import_datasets_into_history(config)
+
+            analyses_dict_list = run_workflow_for_current_organism.get_analyses()
+
+            org_id = run_workflow_for_current_organism.add_organism_and_sync()
+            genome_analysis_id = run_workflow_for_current_organism.add_analysis_and_sync(
+                analyses_dict_list=analyses_dict_list,
+                analysis_name=run_workflow_for_current_organism.genome_analysis_name,
+                analysis_programversion=run_workflow_for_current_organism.genome_analysis_programversion,
+                analysis_sourcename=run_workflow_for_current_organism.genome_analysis_sourcename
+            )
+            ogs_analysis_id = run_workflow_for_current_organism.add_analysis_and_sync(
+                analyses_dict_list=analyses_dict_list,
+                analysis_name=run_workflow_for_current_organism.ogs_analysis_name,
+                analysis_programversion=run_workflow_for_current_organism.ogs_analysis_programversion,
+                analysis_sourcename=run_workflow_for_current_organism.ogs_analysis_sourcename
+            )
+
+            # Create the StrainWorkflowParam object holding all attributes needed for the workflow
+            org_wf_param = OrgWorkflowParamJbrowse(
+                genus_uppercase=run_workflow_for_current_organism.genus_uppercase,
+                full_name=run_workflow_for_current_organism.full_name,
+                species_folder_name=run_workflow_for_current_organism.species_folder_name,
+                chado_species_name=run_workflow_for_current_organism.chado_species_name,
+                org_id=org_id,
+                genome_analysis_id=genome_analysis_id,
+                ogs_analysis_id=ogs_analysis_id,
+                genome_hda_id=run_workflow_for_current_organism.genome_hda_id,
+                gff_hda_id=run_workflow_for_current_organism.gff_hda_id,
+                transcripts_hda_id=run_workflow_for_current_organism.transcripts_hda_id,
+                proteins_hda_id=run_workflow_for_current_organism.proteins_hda_id,
+                history_id=run_workflow_for_current_organism.history_id,
+                instance=run_workflow_for_current_organism.instance
+            )
+            org_wf_param.check_param()
+
+            # Add the species dictionary to the complete dictionary
+            # This dictionary contains every organism present in the input file
+            # Its structure is the following:
+            # {genus species: {strain1_sex1: {variables_key: variables_values}, strain1_sex2: {variables_key: variables_values}}}
+            if not run_workflow_for_current_organism.genus_species in all_org_wf_param_dict.keys():
+                all_org_wf_param_dict[run_workflow_for_current_organism.genus_species] = {
+                    run_workflow_for_current_organism.strain_sex: org_wf_param}
+            else:
+                if not run_workflow_for_current_organism.strain_sex in all_org_wf_param_dict[
+                    run_workflow_for_current_organism.genus_species].keys():
+                    all_org_wf_param_dict[run_workflow_for_current_organism.genus_species][
+                        run_workflow_for_current_organism.strain_sex] = org_wf_param
+                else:
+                    logging.error("Duplicate organism with 'genus_species' = '{0}' and 'strain_sex' = '{1}'".format(
+                        run_workflow_for_current_organism.genus_species, run_workflow_for_current_organism.strain_sex))
+
+    return all_org_wf_param_dict
+
+if __name__ == "__main__":
+    parser = argparse.ArgumentParser(description="Run Galaxy workflows, specific to Phaeoexplorer data")
+
+    parser.add_argument("input",
+                        type=str,
+                        help="Input file (yml)")
+
+    parser.add_argument("-v", "--verbose",
+                        help="Increase output verbosity",
+                        action="store_true")
+
+    parser.add_argument("-vv", "--very_verbose",
+                        help="Increase output verbosity",
+                        action="store_true")
+
+    parser.add_argument("--config",
+                        type=str,
+                        help="Config path, default to the 'config' file inside the script repository")
+
+    parser.add_argument("--main-directory",
+                        type=str,
+                        help="Where the stack containers will be located, defaults to working directory")
+
+    args = parser.parse_args()
+
+    if args.verbose or args.very_verbose:
+        logging.basicConfig(level=logging.DEBUG)
+    else:
+        logging.basicConfig(level=logging.INFO)
+
+    if not args.very_verbose:
+        logging.getLogger("urllib3").setLevel(logging.INFO)
+        logging.getLogger("bioblend").setLevel(logging.INFO)
+
+    # Parsing the config file if provided, using the default config otherwise
+    if args.config:
+        config_file = os.path.abspath(args.config)
+    else:
+        config_file = os.path.join(os.path.dirname(os.path.realpath(sys.argv[0])), constants.DEFAULT_CONFIG)
+
+    main_dir = None
+    if not args.main_directory:
+        main_dir = os.getcwd()
+    else:
+        main_dir = os.path.abspath(args.main_directory)
+
+    config = utilities.parse_config(config_file)
+    sp_dict_list = utilities.parse_input(args.input)
+    script_dir = os.path.dirname(os.path.realpath(sys.argv[0]))
+
+    all_org_wf_param_dict = prepare_history_and_get_wf_param(
+        sp_dict_list=sp_dict_list,
+        main_dir=main_dir,
+        config=config)
+
+    for genus_species, strains in all_org_wf_param_dict.items():
+        strains_list = list(strains.keys())
+        strains_count = len(strains_list)
+
+        if strains_count == 1:
+            logging.info("Input species %s: 1 strain detected in input dictionary" % genus_species)
+            strain_sex = list(strains.keys())[0]
+            org_wf_param = strains[strain_sex]
+
+            # Set workflow path (1 organism)
+            workflow_path = os.path.join(os.path.abspath(script_dir), constants_phaeo.WORKFLOWS_PATH, constants_phaeo.WF_LOAD_GFF_JB_1_ORG_FILE)
+
+            # Check if the versions of tools specified in the workflow are installed in galaxy
+            utilities_bioblend.install_workflow_tools(workflow_path=workflow_path, instance=org_wf_param.instance)
+
+            # Set the workflow parameters (individual tools runtime parameters in the workflow)
+            workflow_parameters = {}
+            # Input files have no parameters (they are set via assigning the hda IDs in the datamap parameter of the bioblend method)
+            workflow_parameters[constants_phaeo.WF_LOAD_GFF_JB_1_ORG_INPUT_GENOME] = {}
+            workflow_parameters[constants_phaeo.WF_LOAD_GFF_JB_1_ORG_INPUT_GFF] = {}
+            workflow_parameters[constants_phaeo.WF_LOAD_GFF_JB_1_ORG_INPUT_PROTEINS] = {}
+            workflow_parameters[constants_phaeo.WF_LOAD_GFF_JB_1_ORG_STEP_LOAD_FASTA] = {
+                "organism": org_wf_param.org_id,
+                "analysis_id": org_wf_param.genome_analysis_id,
+                "do_update": "true"}
+            workflow_parameters[constants_phaeo.WF_LOAD_GFF_JB_1_ORG_STEP_JBROWSE] = {}
+            workflow_parameters[constants_phaeo.WF_LOAD_GFF_JB_1_ORG_STEP_LOAD_GFF] = {
+                "organism": org_wf_param.org_id,
+                "analysis_id": org_wf_param.ogs_analysis_id}
+            workflow_parameters[constants_phaeo.WF_LOAD_GFF_JB_1_ORG_STEP_FEATURE_SYNC] = {
+                "organism_id": org_wf_param.org_id}
+            workflow_parameters[constants_phaeo.WF_LOAD_GFF_JB_1_ORG_STEP_POPULATE_VIEWS] = {}
+            workflow_parameters[constants_phaeo.WF_LOAD_GFF_JB_1_ORG_STEP_INDEX] = {}
+
+            # Set datamap (mapping of input files in the workflow)
+            datamap = {}
+            datamap[constants_phaeo.WF_LOAD_GFF_JB_1_ORG_INPUT_GENOME] = {"src": "hda", "id": org_wf_param.genome_hda_id}
+            datamap[constants_phaeo.WF_LOAD_GFF_JB_1_ORG_INPUT_GFF] = {"src": "hda", "id": org_wf_param.gff_hda_id}
+            datamap[constants_phaeo.WF_LOAD_GFF_JB_1_ORG_INPUT_PROTEINS] = {"src": "hda", "id": org_wf_param.proteins_hda_id}
+
+            with open(workflow_path, 'r') as ga_in_file:
+
+                # Store the decoded json dictionary
+                workflow_dict = json.load(ga_in_file)
+                workflow_name = workflow_dict["name"]
+
+                # For the Jbrowse tool, we unfortunately have to manually edit the parameters instead of setting them
+                # as runtime values, using runtime parameters makes the tool throw an internal critical error ("replace not found" error)
+                # Scratchgmod test: need "http" (or "https"), the hostname (+ port)
+                if constants.CONF_JBROWSE_MENU_URL not in config.keys():
+                    # default
+                    root_url = "https://{0}".format(config[constants.CONF_ALL_HOSTNAME])
+                else:
+                    root_url = config[constants.CONF_JBROWSE_MENU_URL]
+                # Set "Genus" and "species" as they are given in the add_organism tool (with spaces replaced by "_")
+                species_strain_sex = org_wf_param.chado_species_name.replace(" ", "-")
+                jbrowse_menu_url = "{root_url}/sp/{genus_sp}/feature/{Genus}/{species}/mRNA/{id}".format(
+                    root_url=root_url,
+                    genus_sp=genus_species,
+                    Genus=org_wf_param.genus_uppercase,
+                    species=species_strain_sex,
+                    id="{id}")
+                # Replace values in the workflow dictionary
+                workflow_dict["steps"][constants_phaeo.WF_LOAD_GFF_JB_1_ORG_STEP_JBROWSE]["tool_state"] = \
+                    workflow_dict["steps"][constants_phaeo.WF_LOAD_GFF_JB_1_ORG_STEP_JBROWSE]["tool_state"]\
+                    .replace("__MENU_URL_ORG__", jbrowse_menu_url)
+                workflow_dict["steps"][constants_phaeo.WF_LOAD_GFF_JB_1_ORG_STEP_JB_TO_CONTAINER]["tool_state"] = \
+                    workflow_dict["steps"][constants_phaeo.WF_LOAD_GFF_JB_1_ORG_STEP_JB_TO_CONTAINER]["tool_state"]\
+                    .replace("__DISPLAY_NAME_ORG__", org_wf_param.full_name)\
+                    .replace("__UNIQUE_ID_ORG__", org_wf_param.species_folder_name)
+
+                # Import the workflow in galaxy as a dict
+                org_wf_param.instance.workflows.import_workflow_dict(workflow_dict=workflow_dict)
+
+                # Get its attributes
+                workflow_dict_list = org_wf_param.instance.workflows.get_workflows(name=workflow_name)
+                # Then get its ID (required to invoke the workflow)
+                workflow_id = workflow_dict_list[0]["id"]  # Index 0 is the most recently imported workflow (the one we want)
+                logging.debug("Workflow ID: %s" % workflow_id)
+                # Check if the workflow is found
+                try:
+                    show_workflow = org_wf_param.instance.workflows.show_workflow(workflow_id=workflow_id)
+                except bioblend.ConnectionError:
+                    logging.warning("Error finding workflow %s" % workflow_name)
+
+                # Finally, invoke the workflow along with its datamap, parameters and the history in which to invoke it
+                org_wf_param.instance.workflows.invoke_workflow(
+                    workflow_id=workflow_id,
+                    history_id=org_wf_param.history_id,
+                    params=workflow_parameters,
+                    inputs=datamap,
+                    allow_tool_state_corrections=True)
+
+                logging.info("Successfully imported and invoked workflow {0}, check the galaxy instance for the jobs state".format(workflow_name))
+
+        if strains_count == 2:
+
+            logging.info("Input organism %s: 2 species detected in input dictionary" % genus_species)
+            strain_sex_org1 = strains_list[0]
+            strain_sex_org2 = strains_list[1]
+            sp_wf_param_org1 = strains[strain_sex_org1]
+            sp_wf_param_org2 = strains[strain_sex_org2]
+
+            # Set workflow path (2 organisms)
+            workflow_path = os.path.join(os.path.abspath(script_dir), constants_phaeo.WORKFLOWS_PATH, constants_phaeo.WF_LOAD_GFF_JB_2_ORG_FILE)
+
+            # Check if the versions of tools specified in the workflow are installed in galaxy
+            utilities_bioblend.install_workflow_tools(workflow_path=workflow_path, instance=sp_wf_param_org1.instance)
+
+            # Set the workflow parameters (individual tools runtime parameters in the workflow)
+            workflow_parameters = {}
+            # Input files have no parameters (they are set via assigning the hda IDs in the datamap parameter of the bioblend method)
+            workflow_parameters[constants_phaeo.WF_LOAD_GFF_JB_2_ORG_INPUT_GENOME_ORG1] = {}
+            workflow_parameters[constants_phaeo.WF_LOAD_GFF_JB_2_ORG_INPUT_GFF_ORG1] = {}
+            workflow_parameters[constants_phaeo.WF_LOAD_GFF_JB_2_ORG_INPUT_PROTEINS_ORG1] = {}
+            workflow_parameters[constants_phaeo.WF_LOAD_GFF_JB_2_ORG_INPUT_GENOME_ORG2] = {}
+            workflow_parameters[constants_phaeo.WF_LOAD_GFF_JB_2_ORG_INPUT_GFF_ORG2] = {}
+            workflow_parameters[constants_phaeo.WF_LOAD_GFF_JB_2_ORG_INPUT_PROTEINS_ORG2] = {}
+            # Organism 1
+            workflow_parameters[constants_phaeo.WF_LOAD_GFF_JB_2_ORG_STEP_LOAD_FASTA_ORG1] = {
+                "organism": sp_wf_param_org1.org_id,
+                "analysis_id": sp_wf_param_org1.genome_analysis_id,
+                "do_update": "true"}
+            # workflow_parameters[JBROWSE_ORG1] = {"jbrowse_menu_url": jbrowse_menu_url_org1}
+            workflow_parameters[constants_phaeo.WF_LOAD_GFF_JB_2_ORG_STEP_JBROWSE_ORG1] = {}
+            workflow_parameters[constants_phaeo.WF_LOAD_GFF_JB_2_ORG_STEP_LOAD_GFF_ORG1] = {
+                "organism": sp_wf_param_org1.org_id,
+                "analysis_id": sp_wf_param_org1.ogs_analysis_id}
+            workflow_parameters[constants_phaeo.WF_LOAD_GFF_JB_2_ORG_STEP_FEATURE_SYNC_ORG1] = {
+                "organism_id": sp_wf_param_org1.org_id}
+            # workflow_parameters[JBROWSE_CONTAINER] = {"organisms": [{"name": org1_full_name, "unique_id": org1_species_folder_name, }, {"name": org2_full_name, "unique_id": org2_species_folder_name}]}
+            workflow_parameters[constants_phaeo.WF_LOAD_GFF_JB_2_ORG_STEP_JB_TO_CONTAINER] = {}
+            # Organism 2
+            workflow_parameters[constants_phaeo.WF_LOAD_GFF_JB_2_ORG_STEP_LOAD_FASTA_ORG2] = {
+                "organism": sp_wf_param_org2.org_id,
+                "analysis_id": sp_wf_param_org2.genome_analysis_id,
+                "do_update": "true"}
+            workflow_parameters[constants_phaeo.WF_LOAD_GFF_JB_2_ORG_STEP_LOAD_GFF_ORG2] = {
+                "organism": sp_wf_param_org2.org_id,
+                "analysis_id": sp_wf_param_org2.ogs_analysis_id}
+            # workflow_parameters[JRBOWSE_ORG2] = {"jbrowse_menu_url": jbrowse_menu_url_org2}
+            workflow_parameters[constants_phaeo.WF_LOAD_GFF_JB_2_ORG_STEP_JRBOWSE_ORG2] = {}
+            workflow_parameters[constants_phaeo.WF_LOAD_GFF_JB_2_ORG_STEP_FEATURE_SYNC_ORG2] = {
+                "organism_id": sp_wf_param_org2.org_id}
+            # POPULATE + INDEX DATA
+            workflow_parameters[constants_phaeo.WF_LOAD_GFF_JB_2_ORG_STEP_POPULATE_VIEWS] = {}
+            workflow_parameters[constants_phaeo.WF_LOAD_GFF_JB_2_ORG_STEP_INDEX] = {}
+
+            # Set datamap (mapping of input files in the workflow)
+            datamap = {}
+            # Organism 1
+            datamap[constants_phaeo.WF_LOAD_GFF_JB_2_ORG_INPUT_GENOME_ORG1] = {"src": "hda", "id": sp_wf_param_org1.genome_hda_id}
+            datamap[constants_phaeo.WF_LOAD_GFF_JB_2_ORG_INPUT_GFF_ORG1] = {"src": "hda", "id": sp_wf_param_org1.gff_hda_id}
+            datamap[constants_phaeo.WF_LOAD_GFF_JB_2_ORG_INPUT_PROTEINS_ORG1] = {"src": "hda", "id": sp_wf_param_org1.proteins_hda_id}
+            # Organism 2
+            datamap[constants_phaeo.WF_LOAD_GFF_JB_2_ORG_INPUT_GENOME_ORG2] = {"src": "hda", "id": sp_wf_param_org2.genome_hda_id}
+            datamap[constants_phaeo.WF_LOAD_GFF_JB_2_ORG_INPUT_GFF_ORG2] = {"src": "hda", "id": sp_wf_param_org2.gff_hda_id}
+            datamap[constants_phaeo.WF_LOAD_GFF_JB_2_ORG_INPUT_PROTEINS_ORG2] = {"src": "hda", "id": sp_wf_param_org2.proteins_hda_id}
+
+            with open(workflow_path, 'r') as ga_in_file:
+
+                # Store the decoded json dictionary
+                workflow_dict = json.load(ga_in_file)
+                workflow_name = workflow_dict["name"]
+
+                # For the Jbrowse tool, we unfortunately have to manually edit the parameters instead of setting them
+                # as runtime values, using runtime parameters makes the tool throw an internal critical error ("replace not found" error)
+                # Scratchgmod test: need "http" (or "https"), the hostname (+ port)
+                if constants.CONF_JBROWSE_MENU_URL not in config.keys():
+                    # default
+                    root_url = "https://{0}".format(config[constants.CONF_ALL_HOSTNAME])
+                else:
+                    root_url = config[constants.CONF_JBROWSE_MENU_URL]
+                # Set "Genus" and "species" as they are given in the add_organism tool (with spaces replaced by "_")
+                species_strain_sex_org1 = sp_wf_param_org1.chado_species_name.replace(" ", "-")
+                species_strain_sex_org2 = sp_wf_param_org2.chado_species_name.replace(" ", "-")
+                jbrowse_menu_url_org1 = "{root_url}/sp/{genus_sp}/feature/{Genus}/{species}/mRNA/{id}".format(
+                    root_url=root_url,
+                    genus_sp=genus_species,
+                    Genus=sp_wf_param_org1.genus_uppercase,
+                    species=species_strain_sex_org1,
+                    id="{id}")
+                jbrowse_menu_url_org2 = "{root_url}/sp/{genus_sp}/feature/{Genus}/{species}/mRNA/{id}".format(
+                    root_url=root_url,
+                    genus_sp=genus_species,
+                    Genus=sp_wf_param_org2.genus_uppercase,
+                    species=species_strain_sex_org2,
+                    id="{id}")
+                # Replace values in the workflow dictionary
+                workflow_dict["steps"][constants_phaeo.WF_LOAD_GFF_JB_2_ORG_STEP_JBROWSE_ORG1]["tool_state"] = \
+                    workflow_dict["steps"][constants_phaeo.WF_LOAD_GFF_JB_2_ORG_STEP_JBROWSE_ORG1]["tool_state"]\
+                    .replace("__MENU_URL_ORG1__", jbrowse_menu_url_org1)
+                workflow_dict["steps"][constants_phaeo.WF_LOAD_GFF_JB_2_ORG_STEP_JRBOWSE_ORG2]["tool_state"] = \
+                    workflow_dict["steps"][constants_phaeo.WF_LOAD_GFF_JB_2_ORG_STEP_JRBOWSE_ORG2]["tool_state"]\
+                    .replace("__MENU_URL_ORG2__", jbrowse_menu_url_org2)
+                # The UNIQUE_ID is specific to a combination genus_species_strain_sex so every combination should have its unique workflow
+                # in galaxy --> define a naming method for these workflows
+                workflow_dict["steps"][constants_phaeo.WF_LOAD_GFF_JB_2_ORG_STEP_JB_TO_CONTAINER]["tool_state"] = \
+                    workflow_dict["steps"][constants_phaeo.WF_LOAD_GFF_JB_2_ORG_STEP_JB_TO_CONTAINER]["tool_state"]\
+                    .replace("__DISPLAY_NAME_ORG1__", sp_wf_param_org1.full_name)\
+                    .replace("__UNIQUE_ID_ORG1__", sp_wf_param_org1.species_folder_name)\
+                    .replace("__DISPLAY_NAME_ORG2__", sp_wf_param_org2.full_name)\
+                    .replace("__UNIQUE_ID_ORG2__", sp_wf_param_org2.species_folder_name)
+
+                # Import the workflow in galaxy as a dict
+                sp_wf_param_org1.instance.workflows.import_workflow_dict(workflow_dict=workflow_dict)
+
+                # Get its attributes
+                workflow_dict_list = sp_wf_param_org1.instance.workflows.get_workflows(name=workflow_name)
+                # Then get its ID (required to invoke the workflow)
+                workflow_id = workflow_dict_list[0]["id"]  # Index 0 is the most recently imported workflow (the one we want)
+                logging.debug("Workflow ID: %s" % workflow_id)
+                # Check if the workflow is found
+                try:
+                    show_workflow = sp_wf_param_org1.instance.workflows.show_workflow(workflow_id=workflow_id)
+                except bioblend.ConnectionError:
+                    logging.warning("Error finding workflow %s" % workflow_name)
+
+                # Finally, invoke the workflow alogn with its datamap, parameters and the history in which to invoke it
+                sp_wf_param_org1.instance.workflows.invoke_workflow(
+                    workflow_id=workflow_id,
+                    history_id=sp_wf_param_org1.history_id,
+                    params=workflow_parameters,
+                    inputs=datamap,
+                    allow_tool_state_corrections=True)
+
+                logging.info("Successfully imported and invoked workflow {0}, check the galaxy instance for the jobs state".format(workflow_name))
diff --git a/runWorkflowPhaeo.py b/runWorkflowPhaeo.py
new file mode 100644
index 0000000000000000000000000000000000000000..b3194fcc8b51dc27c009c4a12e982a59d1e94c0f
--- /dev/null
+++ b/runWorkflowPhaeo.py
@@ -0,0 +1,144 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+
+import logging
+import json
+import time
+
+import utilities_bioblend
+import speciesData
+import constants
+import constants_phaeo
+
+class OrgWorkflowParam:
+
+    def __init__(self, genus_uppercase, chado_species_name, full_name, species_folder_name,
+                 org_id, history_id, instance):
+        self.genus_uppercase = genus_uppercase
+        self.chado_species_name = chado_species_name
+        self.full_name = full_name
+        self.species_folder_name = species_folder_name
+        self.org_id = org_id
+        self.history_id = history_id
+        self.instance = instance
+
+class RunWorkflow(speciesData.SpeciesData):
+    """
+    Run a workflow into the galaxy instance's history of a given species
+
+
+    This script is made to work for a Phaeoexplorer-specific workflow, but can be adapted to run any workflow,
+    provided the user creates their own workflow in a .ga format, and change the set_parameters function
+    to have the correct parameters for their workflow
+
+    """
+
+    def __init__(self, parameters_dictionary):
+
+        super().__init__(parameters_dictionary)
+        self.history_name = str(self.genus_species)
+
+    def set_galaxy_instance(self, config):
+
+        # Set the instance url attribute --> TODO: the localhost rule in the docker-compose still doesn't work on scratchgmodv1
+        self.instance_url = "http://localhost:{0}/sp/{1}/galaxy/".format(
+            config[constants.CONF_ALL_HTTP_PORT],
+            self.genus_species)
+
+        self.instance = utilities_bioblend.get_galaxy_instance(
+            instance_url=self.instance_url,
+            email=config[constants.CONF_GALAXY_DEFAULT_ADMIN_EMAIL],
+            password=config[constants.CONF_GALAXY_DEFAULT_ADMIN_PASSWORD],
+        )
+
+    def set_history(self):
+        self.history_id = utilities_bioblend.get_history(
+            instance=self.instance,
+            history_name=self.history_name)
+
+    def get_analyses(self):
+
+        get_analyses_tool_dataset = utilities_bioblend.run_tool_and_download_single_output_dataset(
+            instance=self.instance,
+            tool_id=constants_phaeo.GET_ANALYSES_TOOL_ID,
+            history_id=self.history_id,
+            tool_inputs={},
+            time_sleep=10
+        )
+        analyses_dict_list = json.loads(get_analyses_tool_dataset)
+        return analyses_dict_list
+
+    def add_analysis(self, name, programversion, sourcename):
+
+        add_analysis_tool_dataset = utilities_bioblend.run_tool_and_download_single_output_dataset(
+            instance=self.instance,
+            tool_id=constants_phaeo.ADD_ANALYSIS_TOOL_ID,
+            history_id=self.history_id,
+            tool_inputs={"name": name,
+                         "program": constants_phaeo.ADD_ANALYSIS_TOOL_PARAM_PROGRAM,
+                         "programversion": programversion,
+                         "sourcename": sourcename,
+                         "date_executed": constants_phaeo.ADD_ANALYSIS_TOOL_PARAM_DATE},
+            time_sleep = 10
+        )
+        analysis_dict = json.loads(add_analysis_tool_dataset)
+        analysis_id = str(analysis_dict["analysis_id"])
+
+        return analysis_id
+
+    def sync_analysis(self, analysis_id):
+
+        utilities_bioblend.run_tool(
+            instance=self.instance,
+            tool_id=constants_phaeo.ANALYSIS_SYNC_TOOL_ID,
+            history_id=self.history_id,
+            tool_inputs={"analysis_id": analysis_id})
+
+    def add_analysis_and_sync(self, analyses_dict_list, analysis_name, analysis_programversion, analysis_sourcename):
+        """
+        Add one analysis to Chado database
+        Required for Chado Load Tripal Synchronize workflow (which should be ran as the first workflow)
+        Called outside workflow for practical reasons (Chado add doesn't have an input link for analysis or organism)
+        """
+
+        analysis_id = None
+
+        # Look up list of outputs (dictionaries)
+        for analyses_dict in analyses_dict_list:
+            if analyses_dict["name"] == analysis_name:
+                analysis_id = str(analyses_dict["analysis_id"])
+
+        if analysis_id is None:
+            analysis_id = self.add_analysis(
+                name=analysis_name,
+                programversion=analysis_programversion,
+                sourcename=analysis_sourcename
+            )
+
+        # Synchronize analysis in Tripal
+        logging.info("Synchronizing analysis %s in Tripal" % analysis_name)
+        time.sleep(10)
+        self.sync_analysis(analysis_id=analysis_id)
+
+        return analysis_id
+
+    def get_invocation_report(self, workflow_name):
+        """
+        Debugging method for workflows
+
+        Simply logs and returns a report of the previous workflow invocation (execution of a workflow in
+        the instance via the API)
+
+        :param workflow_name:
+        :return:
+        """
+
+        workflow_attributes = self.instance.workflows.get_workflows(name=workflow_name)
+        workflow_id = workflow_attributes[1]["id"]  # Most recently imported workflow (index 1 in the list)
+        invocations = self.instance.workflows.get_invocations(workflow_id=workflow_id)
+        invocation_id = invocations[1]["id"]  # Most recent invocation
+        invocation_report = self.instance.invocations.get_invocation_report(invocation_id=invocation_id)
+
+        logging.debug(invocation_report)
+
+        return invocation_report
diff --git a/run_workflow_phaeoexplorer.py b/run_workflow_phaeoexplorer.py
deleted file mode 100755
index 13e50ff7017b3e3df7710d1b6e256d1fbf57588a..0000000000000000000000000000000000000000
--- a/run_workflow_phaeoexplorer.py
+++ /dev/null
@@ -1,2111 +0,0 @@
-#!/usr/bin/env python3
-# -*- coding: utf-8 -*-
-
-import bioblend
-import bioblend.galaxy.objects
-import argparse
-import os
-import logging
-import sys
-import json
-import time
-
-from bioblend.galaxy.objects import GalaxyInstance
-from bioblend import galaxy
-
-import utilities
-import speciesData
-
-""" 
-gga_init.py
-
-Usage: $ python3 gga_init.py -i input_example.yml --config [config file] [OPTIONS]
-"""
-
-
-class RunWorkflow(speciesData.SpeciesData):
-    """
-    Run a workflow into the galaxy instance's history of a given species
-
-
-    This script is made to work for a Phaeoexplorer-specific workflow, but can be adapted to run any workflow,
-    provided the user creates their own workflow in a .ga format, and change the set_parameters function
-    to have the correct parameters for their workflow
-
-    """
-
-    def set_get_history(self):
-        """
-        Create or set the working history to the current species one
-
-        :return:
-        """
-        try:
-            histories = self.instance.histories.get_histories(name=str(self.genus_species))
-            self.history_id = histories[0]["id"]
-            logging.debug("History ID set for {0}: {1}".format(self.full_name, self.history_id))
-        except IndexError:
-            logging.info("Creating history for %s" % self.full_name)
-            self.instance.histories.create_history(name=str(self.full_name))
-            histories = self.instance.histories.get_histories(name=str(self.genus_species))
-            self.history_id = histories[0]["id"]
-            logging.debug("History ID set for {0}: {1}".format(self.full_name, self.history_id))
-
-        return self.history_id
-
-    def get_instance_attributes(self):
-        """
-        retrieves instance attributes:
-        - working history ID
-        - libraries ID (there should only be one library!)
-        - datasets IDs
-
-        :return:
-        """
-
-        self.set_get_history()
-
-        logging.debug("History ID: %s" % self.history_id)
-        libraries = self.instance.libraries.get_libraries()  # normally only one library
-        library_id = self.instance.libraries.get_libraries()[0]["id"]  # project data folder/library
-        logging.debug("Library ID: %s" % self.library_id)
-        instance_source_data_folders = self.instance.libraries.get_folders(library_id=library_id)
-
-        return {"history_id": self.history_id, "library_id": library_id}
-
-
-    def connect_to_instance(self):
-        """
-        Test the connection to the galaxy instance for the current organism
-        Exit if we cannot connect to the instance
-
-        """
-
-        # logging.debug("Connecting to the galaxy instance (%s)" % self.instance_url)
-        self.instance = galaxy.GalaxyInstance(url=self.instance_url,
-                                              email=self.config["galaxy_default_admin_email"],
-                                              password=self.config["galaxy_default_admin_password"]
-                                              )
-        self.instance.histories.get_histories()
-
-        try:
-            self.instance.histories.get_histories()
-        except bioblend.ConnectionError:
-            logging.critical("Cannot connect to galaxy instance (%s)" % self.instance_url)
-            sys.exit()
-        else:
-            # logging.debug("Successfully connected to galaxy instance (%s) " % self.instance_url)
-            return 1
-
-
-
-    def return_instance(self):
-
-
-        return self.instance
-        
-
-
-    def install_changesets_revisions_for_individual_tools(self):
-        """
-        This function is used to verify that installed tools called outside workflows have the correct versions and changesets
-        If it finds versions don't match, will install the correct version + changeset in the instance
-        Doesn't do anything if versions match
-        
-        :return:
-        """
-
-        self.connect_to_instance()
-
-        logging.info("Validating installed individual tools versions and changesets")
-
-        # Verify that the add_organism and add_analysis versions are correct in the toolshed
-        add_organism_tool = self.instance.tools.show_tool("toolshed.g2.bx.psu.edu/repos/gga/chado_organism_add_organism/organism_add_organism/2.3.4+galaxy0")
-        add_analysis_tool = self.instance.tools.show_tool("toolshed.g2.bx.psu.edu/repos/gga/chado_analysis_add_analysis/analysis_add_analysis/2.3.4+galaxy0")
-        get_organism_tool = self.instance.tools.show_tool("toolshed.g2.bx.psu.edu/repos/gga/chado_organism_get_organisms/organism_get_organisms/2.3.4+galaxy0")
-        get_analysis_tool = self.instance.tools.show_tool("toolshed.g2.bx.psu.edu/repos/gga/chado_analysis_get_analyses/analysis_get_analyses/2.3.4+galaxy0")
-
-        # changeset for 2.3.4+galaxy0 has to be manually found because there is no way to get the wanted changeset of a non installed tool via bioblend 
-        # except for workflows (.ga) that already contain the changeset revisions inside the steps ids
-        
-        if get_organism_tool["version"] != "2.3.4+galaxy0":
-            toolshed_dict = get_organism_tool["tool_shed_repository"]
-            logging.warning("Changeset for %s is not installed" % toolshed_dict["name"])
-            changeset_revision = "831229e6cda2"
-            name = toolshed_dict["name"]
-            owner = toolshed_dict["owner"]
-            toolshed = "https://" + toolshed_dict["tool_shed"]
-            logging.warning("Installing changeset revision {0} for {1}".format(changeset_revision, name))
-
-            self.instance.toolshed.install_repository_revision(tool_shed_url=toolshed, name=name, owner=owner, 
-                                                               changeset_revision=changeset_revision,
-                                                               install_tool_dependencies=True,
-                                                               install_repository_dependencies=False,
-                                                               install_resolver_dependencies=True)
-
-        if get_analysis_tool["version"] != "2.3.4+galaxy0":
-            toolshed_dict = changeset_revision["tool_shed_repository"]
-            logging.warning("Changeset for %s is not installed" % toolshed_dict["name"])
-            changeset_revision = "a867923f555e"
-            name = toolshed_dict["name"]
-            owner = toolshed_dict["owner"]
-            toolshed = "https://" + toolshed_dict["tool_shed"]
-            logging.warning("Installing changeset revision {0} for {1}".format(changeset_revision, name))
-
-            self.instance.toolshed.install_repository_revision(tool_shed_url=toolshed, name=name, owner=owner, 
-                                                               changeset_revision=changeset_revision,
-                                                               install_tool_dependencies=True,
-                                                               install_repository_dependencies=False,
-                                                               install_resolver_dependencies=True)
-
-        if add_organism_tool["version"] != "2.3.4+galaxy0":
-            toolshed_dict = add_organism_tool["tool_shed_repository"]
-            logging.warning("Changeset for %s is not installed" % toolshed_dict["name"])
-            changeset_revision = "1f12b9650028"
-            name = toolshed_dict["name"]
-            owner = toolshed_dict["owner"]
-            toolshed = "https://" + toolshed_dict["tool_shed"]
-            logging.warning("Installing changeset revision {0} for {1}".format(changeset_revision, name))
-
-            self.instance.toolshed.install_repository_revision(tool_shed_url=toolshed, name=name, owner=owner, 
-                                                               changeset_revision=changeset_revision,
-                                                               install_tool_dependencies=True,
-                                                               install_repository_dependencies=False,
-                                                               install_resolver_dependencies=True)
-
-        if add_analysis_tool["version"] != "2.3.4+galaxy0":
-            toolshed_dict = add_analysis_tool["tool_shed_repository"]
-            logging.warning("Changeset for %s is not installed" % toolshed_dict["name"])
-            changeset_revision = "10b2b1c70e69"
-            name = toolshed_dict["name"]
-            owner = toolshed_dict["owner"]
-            toolshed = "https://" + toolshed_dict["tool_shed"]
-            logging.warning("Installing changeset revision {0} for {1}".format(changeset_revision, name))
-
-            self.instance.toolshed.install_repository_revision(tool_shed_url=toolshed, name=name, owner=owner, 
-                                                               changeset_revision=changeset_revision,
-                                                               install_tool_dependencies=True,
-                                                               install_repository_dependencies=False,
-                                                               install_resolver_dependencies=True)
-
-
-        sync_analysis_tool = self.instance.tools.show_tool("toolshed.g2.bx.psu.edu/repos/gga/tripal_analysis_sync/analysis_sync/3.2.1.0")
-        sync_organism_tool = self.instance.tools.show_tool("toolshed.g2.bx.psu.edu/repos/gga/tripal_organism_sync/organism_sync/3.2.1.0")
-
-        if sync_analysis_tool["version"] != "3.2.1.0":
-            toolshed_dict = sync_analysis_tool["tool_shed_repository"]
-            logging.warning("Changeset for %s is not installed" % toolshed_dict["name"])
-            changeset_revision = "f487ff676088"
-            name = toolshed_dict["name"]
-            owner = toolshed_dict["owner"]
-            toolshed = "https://" + toolshed_dict["tool_shed"]
-            logging.warning("Installing changeset revision {0} for {1}".format(changeset_revision, name))
-
-            self.instance.toolshed.install_repository_revision(tool_shed_url=toolshed, name=name, owner=owner, 
-                                                               changeset_revision=changeset_revision,
-                                                               install_tool_dependencies=True,
-                                                               install_repository_dependencies=False,
-                                                               install_resolver_dependencies=True)
-
-        if sync_organism_tool["version"] != "3.2.1.0":
-            toolshed_dict = sync_organism_tool["tool_shed_repository"]
-            logging.warning("Changeset for %s is not installed" % toolshed_dict["name"])
-            changeset_revision = "afd5d92745fb"
-            name = toolshed_dict["name"]
-            owner = toolshed_dict["owner"]
-            toolshed = "https://" + toolshed_dict["tool_shed"]
-            logging.warning("Installing changeset revision {0} for {1}".format(changeset_revision, name))
-
-            self.instance.toolshed.install_repository_revision(tool_shed_url=toolshed, name=name, owner=owner, 
-                                                               changeset_revision=changeset_revision,
-                                                               install_tool_dependencies=True,
-                                                               install_repository_dependencies=False,
-                                                               install_resolver_dependencies=True)
-
-
-        logging.info("Success: individual tools versions and changesets validated")
-
-
-
-    def tripal_synchronize_organism_analyses(self):
-        """
-        """
-        show_tool_tripal_sync = self.instance.tools.show_tool(tool_id="toolshed.g2.bx.psu.edu/repos/gga/tripal_organism_sync/organism_sync/3.2.1.0", io_details=True)
-        org_sync = "toolshed.g2.bx.psu.edu/repos/gga/tripal_organism_sync/organism_sync/3.2.1.0"
-        org_sync = self.instance.tools.run_tool(tool_id="toolshed.g2.bx.psu.edu/repos/gga/tripal_organism_sync/organism_sync/3.2.1.0",
-                                                history_id=self.history_id,
-                                                tool_inputs={"organism_id": "2"})
-        org_sync_job_out = org_sync["outputs"]
-
-
-    def add_organism_ogs_genome_analyses(self):
-        """
-        Add OGS and genome vX analyses to Chado database
-        Required for Chado Load Tripal Synchronize workflow (which should be ran as the first workflow)
-        Called outside workflow for practical reasons (Chado add doesn't have an input link for analysis or organism)
-
-        :return:
-
-        """
-
-        self.connect_to_instance()
-        self.set_get_history()
-
-        tool_version = "2.3.4+galaxy0"
-
-        get_organism_tool = self.instance.tools.show_tool("toolshed.g2.bx.psu.edu/repos/gga/chado_organism_get_organisms/organism_get_organisms/2.3.4+galaxy0")
-
-        get_organisms = self.instance.tools.run_tool(
-            tool_id="toolshed.g2.bx.psu.edu/repos/gga/chado_organism_get_organisms/organism_get_organisms/%s" % tool_version,
-            history_id=self.history_id,
-            tool_inputs={})
-
-        time.sleep(10)  # Ensure the tool has had time to complete
-        org_outputs = get_organisms["outputs"]  # Outputs from the get_organism tool
-        org_job_out_id = org_outputs[0]["id"]  # ID of the get_organism output dataset (list of dicts)
-        org_json_output = self.instance.datasets.download_dataset(dataset_id=org_job_out_id)  # Download the dataset
-        org_output = json.loads(org_json_output)  # Turn the dataset into a list for parsing
-
-        org_id = None
-
-        # Look up list of outputs (dictionaries)
-        for organism_output_dict in org_output:
-            if organism_output_dict["genus"] == self.genus and organism_output_dict["species"] == "{0} {1}".format(self.species, self.sex):
-                correct_organism_id = str(organism_output_dict["organism_id"])  # id needs to be a str to be recognized by chado tools
-                org_id = str(correct_organism_id)
-
-
-        if org_id is None:
-            if self.common == "" or self.common is None:
-                add_org_job = self.instance.tools.run_tool(
-                    tool_id="toolshed.g2.bx.psu.edu/repos/gga/chado_organism_add_organism/organism_add_organism/%s" % tool_version,
-                    history_id=self.history_id,
-                    tool_inputs={"abbr": self.abbreviation,
-                                 "genus": self.genus_uppercase,
-                                 "species": self.chado_species_name,
-                                 "common": self.abbreviation})
-                org_job_out_id = add_org_job["outputs"][0]["id"]
-                org_json_output = self.instance.datasets.download_dataset(dataset_id=org_job_out_id)
-                org_output = json.loads(org_json_output)
-                org_id = str(org_output["organism_id"])  # id needs to be a str to be recognized by chado tools
-            else:
-                add_org_job = self.instance.tools.run_tool(
-                    tool_id="toolshed.g2.bx.psu.edu/repos/gga/chado_organism_add_organism/organism_add_organism/%s" % tool_version,
-                    history_id=self.history_id,
-                    tool_inputs={"abbr": self.abbreviation,
-                                 "genus": self.genus_uppercase,
-                                 "species": self.chado_species_name,
-                                 "common": self.common})
-                org_job_out_id = add_org_job["outputs"][0]["id"]
-                org_json_output = self.instance.datasets.download_dataset(dataset_id=org_job_out_id)
-                org_output = json.loads(org_json_output)
-                org_id = str(org_output["organism_id"])  # id needs to be a str to be recognized by chado tools
-
-        # Synchronize newly added organism in Tripal
-        logging.info("Synchronizing organism %s in Tripal" % self.full_name)
-        time.sleep(60)
-        org_sync = self.instance.tools.run_tool(tool_id="toolshed.g2.bx.psu.edu/repos/gga/tripal_organism_sync/organism_sync/3.2.1.0",
-                                                history_id=self.history_id,
-                                                tool_inputs={"organism_id": org_id})
-
-
-        # Analyses (genome + OGS)
-        get_analyses = self.instance.tools.run_tool(
-            tool_id="toolshed.g2.bx.psu.edu/repos/gga/chado_analysis_get_analyses/analysis_get_analyses/%s" % tool_version,
-            history_id=self.history_id,
-            tool_inputs={})
-
-        time.sleep(10)
-        analysis_outputs = get_analyses["outputs"]
-        analysis_job_out_id = analysis_outputs[0]["id"]
-        analysis_json_output = self.instance.datasets.download_dataset(dataset_id=analysis_job_out_id)
-        analysis_output = json.loads(analysis_json_output)
-
-        ogs_analysis_id = None
-        genome_analysis_id = None
-
-        # Look up list of outputs (dictionaries)
-        for analysis_output_dict in analysis_output:
-            if analysis_output_dict["name"] == self.full_name_lowercase + " OGS" + self.ogs_version:
-                ogs_analysis_id = str(analysis_output_dict["analysis_id"])
-            if analysis_output_dict["name"] == self.full_name_lowercase + " genome v" + self.genome_version:
-                genome_analysis_id = str(analysis_output_dict["analysis_id"])
-
-
-        if ogs_analysis_id is None:
-            add_ogs_analysis_job = self.instance.tools.run_tool(
-                tool_id="toolshed.g2.bx.psu.edu/repos/gga/chado_analysis_add_analysis/analysis_add_analysis/%s" % tool_version,
-                history_id=self.history_id,
-                tool_inputs={"name": self.full_name_lowercase + " OGS" + self.ogs_version,
-                             "program": "Performed by Genoscope",
-                             "programversion": str(self.sex + " OGS" + self.ogs_version),
-                             "sourcename": "Genoscope",
-                             "date_executed": self.date})
-            analysis_outputs = add_ogs_analysis_job["outputs"]
-            analysis_job_out_id = analysis_outputs[0]["id"]
-            analysis_json_output = self.instance.datasets.download_dataset(dataset_id=analysis_job_out_id)
-            analysis_output = json.loads(analysis_json_output)
-            ogs_analysis_id = str(analysis_output["analysis_id"])
-            
-        # Synchronize OGS analysis in Tripal
-        logging.info("Synchronizing OGS%s analysis in Tripal" % self.ogs_version)
-        time.sleep(60)
-        ogs_analysis_sync = self.instance.tools.run_tool(tool_id="toolshed.g2.bx.psu.edu/repos/gga/tripal_analysis_sync/analysis_sync/3.2.1.0",
-                                                         history_id=self.history_id,
-                                                         tool_inputs={"analysis_id": ogs_analysis_id})
-                    
-        if genome_analysis_id is None:
-            add_genome_analysis_job = self.instance.tools.run_tool(
-                tool_id="toolshed.g2.bx.psu.edu/repos/gga/chado_analysis_add_analysis/analysis_add_analysis/%s" % tool_version,
-                history_id=self.history_id,
-                tool_inputs={"name": self.full_name_lowercase + " genome v" + self.genome_version,
-                             "program": "Performed by Genoscope",
-                             "programversion": str(self.sex + "genome v" + self.genome_version),
-                             "sourcename": "Genoscope",
-                             "date_executed": self.date})
-            analysis_outputs = add_genome_analysis_job["outputs"]
-            analysis_job_out_id = analysis_outputs[0]["id"]
-            analysis_json_output = self.instance.datasets.download_dataset(dataset_id=analysis_job_out_id)
-            analysis_output = json.loads(analysis_json_output)
-            genome_analysis_id = str(analysis_output["analysis_id"])
-    
-        # Synchronize genome analysis in Tripal
-        logging.info("Synchronizing genome v%s analysis in Tripal" % self.genome_version)
-        time.sleep(60)
-        genome_analysis_sync = self.instance.tools.run_tool(tool_id="toolshed.g2.bx.psu.edu/repos/gga/tripal_analysis_sync/analysis_sync/3.2.1.0",
-                                                            history_id=self.history_id,
-                                                            tool_inputs={"analysis_id": genome_analysis_id})
-
-        # print({"org_id": org_id, "genome_analysis_id": genome_analysis_id, "ogs_analysis_id": ogs_analysis_id})
-        return({"org_id": org_id, "genome_analysis_id": genome_analysis_id, "ogs_analysis_id": ogs_analysis_id})
-
-
-    def add_organism_blastp_analysis(self):
-        """
-        Add OGS and genome vX analyses to Chado database
-        Required for Chado Load Tripal Synchronize workflow (which should be ran as the first workflow)
-        Called outside workflow for practical reasons (Chado add doesn't have an input link for analysis or organism)
-
-        :return:
-
-        """
-
-        self.connect_to_instance()
-        self.set_get_history()
-
-        tool_version = "2.3.4+galaxy0"
-
-        get_organism_tool = self.instance.tools.show_tool("toolshed.g2.bx.psu.edu/repos/gga/chado_organism_get_organisms/organism_get_organisms/2.3.4+galaxy0")
-
-        get_organisms = self.instance.tools.run_tool(
-            tool_id="toolshed.g2.bx.psu.edu/repos/gga/chado_organism_get_organisms/organism_get_organisms/%s" % tool_version,
-            history_id=self.history_id,
-            tool_inputs={})
-
-        time.sleep(10)  # Ensure the tool has had time to complete
-        org_outputs = get_organisms["outputs"]  # Outputs from the get_organism tool
-        org_job_out_id = org_outputs[0]["id"]  # ID of the get_organism output dataset (list of dicts)
-        org_json_output = self.instance.datasets.download_dataset(dataset_id=org_job_out_id)  # Download the dataset
-        org_output = json.loads(org_json_output)  # Turn the dataset into a list for parsing
-
-        org_id = None
-
-        # Look up list of outputs (dictionaries)
-        for organism_output_dict in org_output:
-            if organism_output_dict["genus"] == self.genus and organism_output_dict["species"] == "{0} {1}".format(self.species, self.sex):
-                correct_organism_id = str(organism_output_dict["organism_id"])  # id needs to be a str to be recognized by chado tools
-                org_id = str(correct_organism_id)
-
-
-        if org_id is None:
-            if self.common == "" or self.common is None:
-                add_org_job = self.instance.tools.run_tool(
-                    tool_id="toolshed.g2.bx.psu.edu/repos/gga/chado_organism_add_organism/organism_add_organism/%s" % tool_version,
-                    history_id=self.history_id,
-                    tool_inputs={"abbr": self.abbreviation,
-                                 "genus": self.genus_uppercase,
-                                 "species": self.chado_species_name,
-                                 "common": self.abbreviation})
-                org_job_out_id = add_org_job["outputs"][0]["id"]
-                org_json_output = self.instance.datasets.download_dataset(dataset_id=org_job_out_id)
-                org_output = json.loads(org_json_output)
-                org_id = str(org_output["organism_id"])  # id needs to be a str to be recognized by chado tools
-            else:
-                add_org_job = self.instance.tools.run_tool(
-                    tool_id="toolshed.g2.bx.psu.edu/repos/gga/chado_organism_add_organism/organism_add_organism/%s" % tool_version,
-                    history_id=self.history_id,
-                    tool_inputs={"abbr": self.abbreviation,
-                                 "genus": self.genus_uppercase,
-                                 "species": self.chado_species_name,
-                                 "common": self.common})
-                org_job_out_id = add_org_job["outputs"][0]["id"]
-                org_json_output = self.instance.datasets.download_dataset(dataset_id=org_job_out_id)
-                org_output = json.loads(org_json_output)
-                org_id = str(org_output["organism_id"])  # id needs to be a str to be recognized by chado tools
-
-            # Synchronize newly added organism in Tripal
-            logging.info("Synchronizing organism %s in Tripal" % self.full_name)
-            time.sleep(60)
-            org_sync = self.instance.tools.run_tool(tool_id="toolshed.g2.bx.psu.edu/repos/gga/tripal_organism_sync/organism_sync/3.2.1.0",
-                                                    history_id=self.history_id,
-                                                    tool_inputs={"organism_id": org_id})
-
-
-        get_analyses = self.instance.tools.run_tool(
-            tool_id="toolshed.g2.bx.psu.edu/repos/gga/chado_analysis_get_analyses/analysis_get_analyses/%s" % tool_version,
-            history_id=self.history_id,
-            tool_inputs={})
-
-        time.sleep(10)
-        analysis_outputs = get_analyses["outputs"]
-        analysis_job_out_id = analysis_outputs[0]["id"]
-        analysis_json_output = self.instance.datasets.download_dataset(dataset_id=analysis_job_out_id)
-        analysis_output = json.loads(analysis_json_output)
-
-        blastp_analysis_id = None
-
-        # Look up list of outputs (dictionaries)
-        for analysis_output_dict in analysis_output:
-            if analysis_output_dict["name"] == "Diamond on " + self.full_name_lowercase + " OGS" + self.ogs_version:
-                blastp_analysis_id = str(analysis_output_dict["analysis_id"])
-
-
-        if blastp_analysis_id is None:
-            add_blast_analysis_job = self.instance.tools.run_tool(
-                tool_id="toolshed.g2.bx.psu.edu/repos/gga/chado_analysis_add_analysis/analysis_add_analysis/%s" % tool_version,
-                history_id=self.history_id,
-                tool_inputs={"name": "Diamond on " + self.full_name_lowercase + " OGS" + self.ogs_version,
-                             "program": "Performed by Genoscope",
-                             "programversion": str(self.sex + " OGS" + self.ogs_version),
-                             "sourcename": "Genoscope",
-                             "date_executed": self.date})
-            analysis_outputs = add_blast_analysis_job["outputs"]
-            analysis_job_out_id = analysis_outputs[0]["id"]
-            analysis_json_output = self.instance.datasets.download_dataset(dataset_id=analysis_job_out_id)
-            analysis_output = json.loads(analysis_json_output)
-            blastp_analysis_id = str(analysis_output["analysis_id"])
-
-        # Synchronize blastp analysis
-        logging.info("Synchronizing Diamong blastp OGS%s analysis in Tripal" % self.ogs_version)
-        time.sleep(60)
-        blastp_analysis_sync = self.instance.tools.run_tool(tool_id="toolshed.g2.bx.psu.edu/repos/gga/tripal_analysis_sync/analysis_sync/3.2.1.0",
-                                                            history_id=self.history_id,
-                                                            tool_inputs={"analysis_id": blastp_analysis_id})
-
-        # print({"org_id": org_id, "genome_analysis_id": genome_analysis_id, "ogs_analysis_id": ogs_analysis_id})
-        return({"org_id": org_id, "blastp_analysis_id": blastp_analysis_id})
-
-    def add_organism_interproscan_analysis(self):
-        """
-        Add OGS and genome vX analyses to Chado database
-        Required for Chado Load Tripal Synchronize workflow (which should be ran as the first workflow)
-        Called outside workflow for practical reasons (Chado add doesn't have an input link for analysis or organism)
-
-        :return:
-
-        """
-
-        self.connect_to_instance()
-        self.set_get_history()
-
-        tool_version = "2.3.4+galaxy0"
-
-        get_organism_tool = self.instance.tools.show_tool("toolshed.g2.bx.psu.edu/repos/gga/chado_organism_get_organisms/organism_get_organisms/2.3.4+galaxy0")
-
-        get_organisms = self.instance.tools.run_tool(
-            tool_id="toolshed.g2.bx.psu.edu/repos/gga/chado_organism_get_organisms/organism_get_organisms/%s" % tool_version,
-            history_id=self.history_id,
-            tool_inputs={})
-
-        time.sleep(10)  # Ensure the tool has had time to complete
-        org_outputs = get_organisms["outputs"]  # Outputs from the get_organism tool
-        org_job_out_id = org_outputs[0]["id"]  # ID of the get_organism output dataset (list of dicts)
-        org_json_output = self.instance.datasets.download_dataset(dataset_id=org_job_out_id)  # Download the dataset
-        org_output = json.loads(org_json_output)  # Turn the dataset into a list for parsing
-
-        org_id = None
-
-        # Look up list of outputs (dictionaries)
-        for organism_output_dict in org_output:
-            if organism_output_dict["genus"] == self.genus and organism_output_dict["species"] == "{0} {1}".format(self.species, self.sex):
-                correct_organism_id = str(organism_output_dict["organism_id"])  # id needs to be a str to be recognized by chado tools
-                org_id = str(correct_organism_id)
-
-
-        if org_id is None:
-            if self.common == "" or self.common is None:
-                add_org_job = self.instance.tools.run_tool(
-                    tool_id="toolshed.g2.bx.psu.edu/repos/gga/chado_organism_add_organism/organism_add_organism/%s" % tool_version,
-                    history_id=self.history_id,
-                    tool_inputs={"abbr": self.abbreviation,
-                                 "genus": self.genus_uppercase,
-                                 "species": self.chado_species_name,
-                                 "common": self.abbreviation})
-                org_job_out_id = add_org_job["outputs"][0]["id"]
-                org_json_output = self.instance.datasets.download_dataset(dataset_id=org_job_out_id)
-                org_output = json.loads(org_json_output)
-                org_id = str(org_output["organism_id"])  # id needs to be a str to be recognized by chado tools
-            else:
-                add_org_job = self.instance.tools.run_tool(
-                    tool_id="toolshed.g2.bx.psu.edu/repos/gga/chado_organism_add_organism/organism_add_organism/%s" % tool_version,
-                    history_id=self.history_id,
-                    tool_inputs={"abbr": self.abbreviation,
-                                 "genus": self.genus_uppercase,
-                                 "species": self.chado_species_name,
-                                 "common": self.common})
-                org_job_out_id = add_org_job["outputs"][0]["id"]
-                org_json_output = self.instance.datasets.download_dataset(dataset_id=org_job_out_id)
-                org_output = json.loads(org_json_output)
-                org_id = str(org_output["organism_id"])  # id needs to be a str to be recognized by chado tools
-
-            # Synchronize newly added organism in Tripal
-            logging.info("Synchronizing organism %s in Tripal" % self.full_name)
-            time.sleep(60)
-            org_sync = self.instance.tools.run_tool(tool_id="toolshed.g2.bx.psu.edu/repos/gga/tripal_organism_sync/organism_sync/3.2.1.0",
-                                                    history_id=self.history_id,
-                                                    tool_inputs={"organism_id": org_id})
-
-
-        get_analyses = self.instance.tools.run_tool(
-            tool_id="toolshed.g2.bx.psu.edu/repos/gga/chado_analysis_get_analyses/analysis_get_analyses/%s" % tool_version,
-            history_id=self.history_id,
-            tool_inputs={})
-
-        time.sleep(10)
-        analysis_outputs = get_analyses["outputs"]
-        analysis_job_out_id = analysis_outputs[0]["id"]
-        analysis_json_output = self.instance.datasets.download_dataset(dataset_id=analysis_job_out_id)
-        analysis_output = json.loads(analysis_json_output)
-
-        interpro_analysis_id = None
-
-        # Look up list of outputs (dictionaries)
-        for analysis_output_dict in analysis_output:
-            if analysis_output_dict["name"] == "Interproscan on " + self.full_name_lowercase + " OGS" + self.ogs_version:
-                interpro_analysis_id = str(analysis_output_dict["analysis_id"])
-
-
-        if interpro_analysis_id is None:
-            add_interproscan_analysis_job = self.instance.tools.run_tool(
-                tool_id="toolshed.g2.bx.psu.edu/repos/gga/chado_analysis_add_analysis/analysis_add_analysis/%s" % tool_version,
-                history_id=self.history_id,
-                tool_inputs={"name": "Interproscan on " + self.full_name_lowercase + " OGS" + self.ogs_version,
-                             "program": "Performed by Genoscope",
-                             "programversion": str(self.sex + " OGS" + self.ogs_version),
-                             "sourcename": "Genoscope",
-                             "date_executed": self.date})
-            analysis_outputs = add_interproscan_analysis_job["outputs"]
-            analysis_job_out_id = analysis_outputs[0]["id"]
-            analysis_json_output = self.instance.datasets.download_dataset(dataset_id=analysis_job_out_id)
-            analysis_output = json.loads(analysis_json_output)
-            interpro_analysis_id = str(analysis_output["analysis_id"])
-
-        # Synchronize blastp analysis
-        logging.info("Synchronizing Diamong blastp OGS%s analysis in Tripal" % self.ogs_version)
-        time.sleep(60)
-        interproscan_analysis_sync = self.instance.tools.run_tool(tool_id="toolshed.g2.bx.psu.edu/repos/gga/tripal_analysis_sync/analysis_sync/3.2.1.0",
-                                                            history_id=self.history_id,
-                                                            tool_inputs={"analysis_id": interpro_analysis_id})
-
-        # print({"org_id": org_id, "genome_analysis_id": genome_analysis_id, "ogs_analysis_id": ogs_analysis_id})
-        return({"org_id": org_id, "interpro_analysis_id": interpro_analysis_id})
-
-
-    def get_interpro_analysis_id(self):
-        """
-        """
-
-        # Get interpro ID
-        interpro_analysis = self.instance.tools.run_tool(
-            tool_id="toolshed.g2.bx.psu.edu/repos/gga/chado_analysis_get_analyses/analysis_get_analyses/2.3.4+galaxy0",
-            history_id=self.history_id,
-            tool_inputs={"name": "InterproScan on OGS%s" % self.ogs_version})
-        interpro_analysis_job_out = interpro_analysis["outputs"][0]["id"]
-        interpro_analysis_json_output = self.instance.datasets.download_dataset(dataset_id=interpro_analysis_job_out)
-        try:
-            interpro_analysis_output = json.loads(interpro_analysis_json_output)[0]
-            self.interpro_analysis_id = str(interpro_analysis_output["analysis_id"])
-        except IndexError as exc:
-            logging.critical("No matching InterproScan analysis exists in the instance's chado database")
-            sys.exit(exc)
-
-        return self.interpro_analysis_id
-
-
-    def get_invocation_report(self, workflow_name):
-        """
-        Debugging method for workflows
-
-        Simply logs and returns a report of the previous workflow invocation (execution of a workflow in
-        the instance via the API)
-
-        :param workflow_name:
-        :return:
-        """
-
-        workflow_attributes = self.instance.workflows.get_workflows(name=workflow_name)
-        workflow_id = workflow_attributes[1]["id"]  # Most recently imported workflow (index 1 in the list)
-        invocations = self.instance.workflows.get_invocations(workflow_id=workflow_id)
-        invocation_id = invocations[1]["id"]  # Most recent invocation
-        invocation_report = self.instance.invocations.get_invocation_report(invocation_id=invocation_id)
-
-        logging.debug(invocation_report)
-
-        return invocation_report
-
-
-    def import_datasets_into_history(self):
-        """
-        Find datasets in a library, get their ID and import them into the current history if they are not already
-
-        :return:
-        """
-
-        # Instanciate the instance 
-        gio = GalaxyInstance(url=self.instance_url,
-                             email=self.config["galaxy_default_admin_email"],
-                             password=self.config["galaxy_default_admin_password"])
-
-        prj_lib = gio.libraries.get_previews(name="Project Data")
-        library_id = prj_lib[0].id
-
-        instance_source_data_folders = self.instance.libraries.get_folders(library_id=str(library_id))
-
-        folders_ids = {}
-        folder_name = ""
-
-        # Loop over the folders in the library and map folders names to their IDs
-        for i in instance_source_data_folders:
-            folders_ids[i["name"]] = i["id"]
-
-        # Iterating over the folders to find datasets and map datasets to their IDs
-        for k, v in folders_ids.items():
-            if k == "/genome/{0}/v{1}".format(self.species_folder_name, self.genome_version):
-                sub_folder_content = self.instance.folders.show_folder(folder_id=v, contents=True)
-                for k2, v2 in sub_folder_content.items():
-                    for e in v2:
-                        if type(e) == dict:
-                            if e["name"].endswith(".fasta"):
-                                self.datasets["genome_file"] = e["ldda_id"]
-                                self.datasets_name["genome_file"] = e["name"]
-
-            if k == "/annotation/{0}/OGS{1}".format(self.species_folder_name, self.ogs_version):
-                sub_folder_content = self.instance.folders.show_folder(folder_id=v, contents=True)
-                for k2, v2 in sub_folder_content.items():
-                    for e in v2:
-                        if type(e) == dict:
-                            if "transcripts" in e["name"]:
-                                self.datasets["transcripts_file"] = e["ldda_id"]
-                                self.datasets_name["transcripts_file"] = e["name"]
-                            elif "proteins" in e["name"]:
-                                self.datasets["proteins_file"] = e["ldda_id"]
-                                self.datasets_name["proteins_file"] = e["name"]
-                            elif "gff" in e["name"]:
-                                self.datasets["gff_file"] = e["ldda_id"]
-                                self.datasets_name["gff_file"] = e["name"]
-                            elif "interpro" in e["name"]:
-                                self.datasets["interproscan_file"] = e["ldda_id"]
-                                self.datasets_name["interproscan_file"] = e["name"]
-                            elif "blastp" in e["name"]:
-                                self.datasets["blastp_file"] = e["ldda_id"]
-                                self.datasets_name["blastp_file"] = e["name"]
-
-
-        history_datasets_li = self.instance.datasets.get_datasets()
-        genome_hda_id, gff_hda_id, transcripts_hda_id, proteins_hda_id, blastp_hda_id, interproscan_hda_id = None, None, None, None, None, None
-
-        # Finding datasets in history (matching datasets names)
-        for dataset in history_datasets_li:
-            dataset_name = dataset["name"]
-            dataset_id = dataset["id"]
-            if dataset_name == "{0}_v{1}.fasta".format(self.dataset_prefix, self.genome_version):
-                genome_hda_id = dataset_id
-            if dataset_name == "{0}_OGS{1}_{2}.gff".format(self.dataset_prefix, self.ogs_version, self.date):
-                gff_hda_id = dataset_id
-            if dataset_name == "{0}_OGS{1}_transcripts.fasta".format(self.dataset_prefix, self.ogs_version):
-                transcripts_hda_id = dataset_id
-            if dataset_name == "{0}_OGS{1}_proteins.fasta".format(self.dataset_prefix, self.ogs_version):
-                proteins_hda_id = dataset_id
-            if dataset_name == "{0}_OGS{1}_blastp.xml".format(self.dataset_prefix, self.ogs_version):
-                blastp_hda_id = dataset_id
-            if dataset_name == "{0}_OGS{1}_interproscan.xml".format(self.dataset_prefix, self.ogs_version):
-                interproscan_hda_id = dataset_id
-
-                    
-        # Import each dataset into history if it is not imported
-        logging.debug("Uploading datasets into history %s" % self.history_id)
-
-        if genome_hda_id is None:
-            genome_dataset_upload = self.instance.histories.upload_dataset_from_library(history_id=self.history_id, lib_dataset_id=self.datasets["genome_file"])
-            genome_hda_id = genome_dataset_upload["id"]
-        if gff_hda_id is  None:
-            gff_dataset_upload = self.instance.histories.upload_dataset_from_library(history_id=self.history_id, lib_dataset_id=self.datasets["gff_file"])
-            gff_hda_id = gff_dataset_upload["id"]
-        if transcripts_hda_id is None:
-            transcripts_dataset_upload = self.instance.histories.upload_dataset_from_library(history_id=self.history_id, lib_dataset_id=self.datasets["transcripts_file"])
-            transcripts_hda_id = transcripts_dataset_upload["id"]
-        if proteins_hda_id is None:
-            proteins_dataset_upload = self.instance.histories.upload_dataset_from_library(history_id=self.history_id, lib_dataset_id=self.datasets["proteins_file"])
-            proteins_hda_id = proteins_dataset_upload["id"]
-        if interproscan_hda_id is None:
-            try:
-                interproscan_dataset_upload = self.instance.histories.upload_dataset_from_library(history_id=self.history_id, lib_dataset_id=self.datasets["interproscan_file"])
-                interproscan_hda_id = interproscan_dataset_upload["id"]
-            except Exception as exc:
-                logging.debug("Interproscan file not found in library (history: {0})".format(self.history_id))
-        if blastp_hda_id is None:
-            try:
-                blastp_dataset_upload = self.instance.histories.upload_dataset_from_library(history_id=self.history_id, lib_dataset_id=self.datasets["blastp_file"])
-                blastp_hda_id = blastp_dataset_upload["id"]
-            except Exception as exc:
-                logging.debug("blastp file not found in library (history: {0})".format(self.history_id))
-
-        # logging.debug("History dataset IDs (hda_id) for %s:" % self.full_name)
-        # logging.debug({"genome_hda_id": genome_hda_id,
-        #         "gff_hda_id": gff_hda_id,
-        #         "transcripts_hda_id": transcripts_hda_id,
-        #         "proteins_hda_id": proteins_hda_id,
-        #         "blastp_hda_id": blastp_hda_id,
-        #         "interproscan_hda_id": interproscan_hda_id})
-
-        # Return a dict made of the hda ids
-        return {"genome_hda_id": genome_hda_id, 
-                "gff_hda_id": gff_hda_id, 
-                "transcripts_hda_id": transcripts_hda_id, 
-                "proteins_hda_id": proteins_hda_id, 
-                "blastp_hda_id": blastp_hda_id,
-                "interproscan_hda_id": interproscan_hda_id}
-
-
-    def get_datasets_hda_ids(self):
-        """
-        Get the hda IDs of the datasets imported into an history
-
-        As some tools will not work using the input datasets ldda IDs we need to retrieve the datasets IDs imported
-        into an history
-
-
-        :return:
-        """
-
-        # List of all datasets in the instance (including outputs from jobs)
-        # "limit" and "offset" options *may* be used to restrict search to specific datasets but since
-        # there is no way to know which imported datasets are the correct ones depending on history content
-        # it's not currently used
-        history_datasets_li = self.instance.datasets.get_datasets()
-
-        genome_dataset_hda_id, gff_dataset_hda_id, transcripts_dataset_hda_id, proteins_datasets_hda_id = None, None, None, None
-        interproscan_dataset_hda_id, blast_diamond_dataset_hda_id = None, None
-
-        # Match files imported in history names vs library datasets names to assign their respective hda_id
-        for dataset_dict in history_datasets_li:
-            if dataset_dict["history_id"] == self.history_id:
-                if dataset_dict["name"] == self.datasets_name["genome_file"] and dataset_dict["id"] not in imported_datasets_ids:
-                    genome_dataset_hda_id = dataset_dict["id"]
-                elif dataset_dict["name"] == self.datasets_name["proteins_file"] and dataset_dict["id"] not in imported_datasets_ids:
-                    proteins_datasets_hda_id = dataset_dict["id"]
-                elif dataset_dict["name"] == self.datasets_name["transcripts_file"] and dataset_dict["id"] not in imported_datasets_ids:
-                    transcripts_dataset_hda_id = dataset_dict["id"]
-                elif dataset_dict["name"] == self.datasets_name["gff_file"] and dataset_dict["id"] not in imported_datasets_ids:
-                    gff_dataset_hda_id = dataset_dict["id"]
-                if "interproscan_file" in self.datasets_name.keys():
-                    if dataset_dict["name"] == self.datasets_name["interproscan_file"] and dataset_dict["id"] not in imported_datasets_ids:
-                        interproscan_dataset_hda_id = dataset_dict["id"]
-                if "blast_diamond_file" in self.datasets_name.keys():
-                    if dataset_dict["name"] == self.datasets_name["blastp_file"] and dataset_dict["id"] not in imported_datasets_ids:
-                        blastp_dataset_hda_id = dataset_dict["id"]
-                    
-        logging.debug("Genome dataset hda id: %s" % genome_dataset_hda_id)
-        logging.debug("Proteins dataset hda ID: %s" % proteins_datasets_hda_id)
-        logging.debug("Transcripts dataset hda ID: %s" % transcripts_dataset_hda_id)
-        logging.debug("GFF dataset hda ID: %s" % gff_dataset_hda_id)
-        logging.debug("InterproScan dataset hda ID: %s" % gff_dataset_hda_id)
-        logging.debug("Blastp Diamond dataset hda ID: %s" % blastp_dataset_hda_id)
-
-        # Add datasets IDs to already imported IDs (so we don't assign all the wrong IDs to the next organism if there is one)
-        imported_datasets_ids.append(genome_dataset_hda_id)
-        imported_datasets_ids.append(transcripts_dataset_hda_id)
-        imported_datasets_ids.append(proteins_datasets_hda_id)
-        imported_datasets_ids.append(gff_dataset_hda_id)
-        imported_datasets_ids.append(interproscan_dataset_hda_id)
-        imported_datasets_ids.append(blastp_dataset_hda_id)
-
-        # Return a dict made of the hda ids
-        return {"genome_hda_id": genome_dataset_hda_id, "transcripts_hda_id": transcripts_dataset_hda_id,
-                "proteins_hda_id": proteins_datasets_hda_id, "gff_hda_id": gff_dataset_hda_id,
-                "interproscan_hda_id": interproscan_dataset_hda_id,
-                "blastp_hda_id": blastp_dataset_hda_id,
-                "imported_datasets_ids": imported_datasets_ids}
-
-
-def run_workflow(workflow_path, workflow_parameters, datamap, config, input_species_number):
-    """
-    Run a workflow in galaxy
-    Requires the .ga file to be loaded as a dictionary (optionally could be uploaded as a raw file)
-
-    :param workflow_name:
-    :param workflow_parameters:
-    :param datamap:
-    :return:
-    """
-
-    logging.info("Importing workflow %s" % str(workflow_path))
-
-    # Load the workflow file (.ga) in a buffer
-    with open(workflow_path, 'r') as ga_in_file:
-
-        # Then store the decoded json dictionary
-        workflow_dict = json.load(ga_in_file)
-
-        # In case of the Jbrowse workflow, we unfortunately have to manually edit the parameters instead of setting them
-        # as runtime values, using runtime parameters makes the tool throw an internal critical error ("replace not found" error)
-        # Scratchgmod test: need "http" (or "https"), the hostname (+ port)
-        if "jbrowse_menu_url" not in config.keys():
-            jbrowse_menu_url = "https://{hostname}/sp/{genus_sp}/feature/{Genus}/{species}/mRNA/{id}".format(hostname=self.config["hostname"], genus_sp=self.genus_species, Genus=self.genus_uppercase, species=self.species, id="{id}")
-        else:
-            jbrowse_menu_url = config["jbrowse_menu_url"]
-        if workflow_name == "Jbrowse":
-            workflow_dict["steps"]["2"]["tool_state"] = workflow_dict["steps"]["2"]["tool_state"].replace("__MENU_URL__", jbrowse_menu_url)
-            # The UNIQUE_ID is specific to a combination genus_species_strain_sex so every combination should have its unique workflow
-            # in galaxy --> define a naming method for these workflows
-            workflow_dict["steps"]["3"]["tool_state"] = workflow_dict["steps"]["3"]["tool_state"].replace("__FULL_NAME__", self.full_name).replace("__UNIQUE_ID__", self.species_folder_name)
-
-        # Import the workflow in galaxy as a dict
-        self.instance.workflows.import_workflow_dict(workflow_dict=workflow_dict)
-
-        # Get its attributes
-        workflow_attributes = self.instance.workflows.get_workflows(name=workflow_name)
-        # Then get its ID (required to invoke the workflow)
-        workflow_id = workflow_attributes[0]["id"]  # Index 0 is the most recently imported workflow (the one we want)
-        show_workflow = self.instance.workflows.show_workflow(workflow_id=workflow_id)
-        # Check if the workflow is found
-        try:
-            logging.debug("Workflow ID: %s" % workflow_id)
-        except bioblend.ConnectionError:
-            logging.warning("Error retrieving workflow attributes for workflow %s" % workflow_name)
-
-        # Finally, invoke the workflow alogn with its datamap, parameters and the history in which to invoke it
-        self.instance.workflows.invoke_workflow(workflow_id=workflow_id,
-                                                history_id=self.history_id,
-                                                params=workflow_parameters,
-                                                inputs=datamap,
-                                                allow_tool_state_corrections=True)
-
-        logging.info("Successfully imported and invoked workflow {0}, check the galaxy instance ({1}) for the jobs state".format(workflow_name, self.instance_url))
-
-
-
-
-def create_sp_workflow_dict(sp_dict, main_dir, config, workflow_type):
-    """
-    """
-
-    sp_workflow_dict = {}
-    run_workflow_for_current_organism = RunWorkflow(parameters_dictionary=sp_dict)
-
-    # Verifying the galaxy container is running
-    if utilities.check_galaxy_state(genus_lowercase=run_workflow_for_current_organism.genus_lowercase,
-                                    species=run_workflow_for_current_organism.species,
-                                    script_dir=run_workflow_for_current_organism.script_dir):
-
-        # Setting some of the instance attributes
-        run_workflow_for_current_organism.main_dir = main_dir
-        run_workflow_for_current_organism.species_dir = os.path.join(run_workflow_for_current_organism.main_dir,
-                                                                     run_workflow_for_current_organism.genus_species +
-                                                                     "/")
-
-        # Parse the config yaml file
-        run_workflow_for_current_organism.config = config
-        # Set the instance url attribute --> TODO: the localhost rule in the docker-compose still doesn't work on scratchgmodv1
-        run_workflow_for_current_organism.instance_url = "http://localhost:{0}/sp/{1}_{2}/galaxy/".format( 
-            run_workflow_for_current_organism.config["http_port"],
-            run_workflow_for_current_organism.genus_lowercase,
-            run_workflow_for_current_organism.species)
-
-
-        if workflow_type == "load_fasta_gff_jbrowse":
-            run_workflow_for_current_organism.connect_to_instance()
-
-            history_id = run_workflow_for_current_organism.set_get_history()
-
-            run_workflow_for_current_organism.install_changesets_revisions_for_individual_tools()
-            ids = run_workflow_for_current_organism.add_organism_ogs_genome_analyses()
-
-            org_id = None
-            genome_analysis_id = None
-            ogs_analysis_id = None
-            org_id = ids["org_id"]
-            genome_analysis_id = ids["genome_analysis_id"]
-            ogs_analysis_id = ids["ogs_analysis_id"]
-            instance_attributes = run_workflow_for_current_organism.get_instance_attributes()
-            hda_ids = run_workflow_for_current_organism.import_datasets_into_history()
-
-            strain_sex = "{0}_{1}".format(run_workflow_for_current_organism.strain, run_workflow_for_current_organism.sex)
-            genus_species = run_workflow_for_current_organism.genus_species
-
-            # Create the dictionary holding all attributes needed to connect to the galaxy instance
-            attributes = {"genus": run_workflow_for_current_organism.genus,
-                          "species": run_workflow_for_current_organism.species,
-                          "genus_species": run_workflow_for_current_organism.genus_species,
-                          "full_name": run_workflow_for_current_organism.full_name,
-                          "species_folder_name": run_workflow_for_current_organism.species_folder_name,
-                          "sex": run_workflow_for_current_organism.sex,
-                          "strain": run_workflow_for_current_organism.strain,
-                          "org_id": org_id,
-                          "genome_analysis_id": genome_analysis_id,
-                          "ogs_analysis_id": ogs_analysis_id,
-                          "instance_attributes": instance_attributes,
-                          "hda_ids": hda_ids,
-                          "history_id": history_id,
-                          "instance": run_workflow_for_current_organism.instance,
-                          "instance_url": run_workflow_for_current_organism.instance_url,
-                          "email": config["galaxy_default_admin_email"],
-                          "password": config["galaxy_default_admin_password"]}
-
-            sp_workflow_dict[genus_species] = {strain_sex: attributes}
-
-        else:
-            logging.critical("The galaxy container for %s is not ready yet!" % run_workflow_for_current_organism.full_name)
-            sys.exit()
-
-        return sp_workflow_dict
-
-    if workflow_type == "blast":
-        run_workflow_for_current_organism.connect_to_instance()
-
-        history_id = run_workflow_for_current_organism.set_get_history()
-
-        run_workflow_for_current_organism.install_changesets_revisions_for_individual_tools()
-        ids = run_workflow_for_current_organism.add_organism_blastp_analysis()
-
-        org_id = None
-        org_id = ids["org_id"]
-        blastp_analysis_id = None
-        blastp_analysis_id = ids["blastp_analysis_id"]
-        instance_attributes = run_workflow_for_current_organism.get_instance_attributes()
-        hda_ids = run_workflow_for_current_organism.import_datasets_into_history()
-
-        strain_sex = "{0}_{1}".format(run_workflow_for_current_organism.strain, run_workflow_for_current_organism.sex)
-        genus_species = run_workflow_for_current_organism.genus_species
-
-        # Create the dictionary holding all attributes needed to connect to the galaxy instance
-        attributes = {"genus": run_workflow_for_current_organism.genus,
-                      "species": run_workflow_for_current_organism.species,
-                      "genus_species": run_workflow_for_current_organism.genus_species,
-                      "full_name": run_workflow_for_current_organism.full_name,
-                      "species_folder_name": run_workflow_for_current_organism.species_folder_name,
-                      "sex": run_workflow_for_current_organism.sex,
-                      "strain": run_workflow_for_current_organism.strain,
-                      "org_id": org_id,
-                      "blastp_analysis_id": blastp_analysis_id,
-                      "instance_attributes": instance_attributes,
-                      "hda_ids": hda_ids,
-                      "history_id": history_id,
-                      "instance": run_workflow_for_current_organism.instance,
-                      "instance_url": run_workflow_for_current_organism.instance_url,
-                      "email": config["galaxy_default_admin_email"],
-                      "password": config["galaxy_default_admin_password"]}
-
-        sp_workflow_dict[genus_species] = {strain_sex: attributes}
-
-
-    if workflow_type == "interpro":
-        run_workflow_for_current_organism.connect_to_instance()
-
-        history_id = run_workflow_for_current_organism.set_get_history()
-
-        run_workflow_for_current_organism.install_changesets_revisions_for_individual_tools()
-        ids = run_workflow_for_current_organism.add_organism_interproscan_analysis()
-
-        org_id = None
-        org_id = ids["org_id"]
-        interpro_analysis_id = None
-        interpro_analysis_id = ids["interpro_analysis_id"]
-        instance_attributes = run_workflow_for_current_organism.get_instance_attributes()
-        hda_ids = run_workflow_for_current_organism.import_datasets_into_history()
-
-        strain_sex = "{0}_{1}".format(run_workflow_for_current_organism.strain, run_workflow_for_current_organism.sex)
-        genus_species = run_workflow_for_current_organism.genus_species
-
-        # Create the dictionary holding all attributes needed to connect to the galaxy instance
-        attributes = {"genus": run_workflow_for_current_organism.genus,
-                      "species": run_workflow_for_current_organism.species,
-                      "genus_species": run_workflow_for_current_organism.genus_species,
-                      "full_name": run_workflow_for_current_organism.full_name,
-                      "species_folder_name": run_workflow_for_current_organism.species_folder_name,
-                      "sex": run_workflow_for_current_organism.sex,
-                      "strain": run_workflow_for_current_organism.strain,
-                      "org_id": org_id,
-                      "interpro_analysis_id": interpro_analysis_id,
-                      "instance_attributes": instance_attributes,
-                      "hda_ids": hda_ids,
-                      "history_id": history_id,
-                      "instance": run_workflow_for_current_organism.instance,
-                      "instance_url": run_workflow_for_current_organism.instance_url,
-                      "email": config["galaxy_default_admin_email"],
-                      "password": config["galaxy_default_admin_password"]}
-
-        sp_workflow_dict[genus_species] = {strain_sex: attributes}
-
-    else:
-        logging.critical("The galaxy container for %s is not ready yet!" % run_workflow_for_current_organism.full_name)
-        sys.exit()
-
-
-
-def install_changesets_revisions_from_workflow(instance, workflow_path):
-    """
-    Read a .ga file to extract the information about the different tools called. 
-    Check if every tool is installed via a "show_tool".
-    If a tool is not installed (versions don't match), send a warning to the logger and install the required changeset (matching the tool version)
-    Doesn't do anything if versions match
-
-    :return:
-    """
-
-    logging.info("Validating that installed tools versions and changesets match workflow versions")
-
-    # Load the workflow file (.ga) in a buffer
-    with open(workflow_path, 'r') as ga_in_file:
-
-        # Then store the decoded json dictionary
-        workflow_dict = json.load(ga_in_file)
-
-        # Look up every "step_id" looking for tools
-        for k, v in workflow_dict["steps"].items():
-            if v["tool_id"]:
-                # Get the descriptive dictionary of the installed tool (using the tool id in the workflow)
-                show_tool = instance.tools.show_tool(v["tool_id"])
-                # Check if an installed version matches the workflow tool version
-                # (If it's not installed, the show_tool version returned will be a default version with the suffix "XXXX+0")
-                if show_tool["version"] != v["tool_version"]:
-                    # If it doesn't match, proceed to install of the correct changeset revision
-                    toolshed = "https://" + v["tool_shed_repository"]["tool_shed"]
-                    name = v["tool_shed_repository"]["name"]
-                    owner = v["tool_shed_repository"]["owner"]
-                    changeset_revision = v["tool_shed_repository"]["changeset_revision"]
-                    
-                    logging.warning("Installed tool versions for tool {0} do not match the version required by the specified workflow, installing changeset {1}".format(name, changeset_revision))
-
-                    # Install changeset
-                    instance.toolshed.install_repository_revision(tool_shed_url=toolshed, name=name, owner=owner, 
-                                                                       changeset_revision=changeset_revision,
-                                                                       install_tool_dependencies=True,
-                                                                       install_repository_dependencies=False,
-                                                                       install_resolver_dependencies=True)
-                else:
-                    toolshed = "https://" + v["tool_shed_repository"]["tool_shed"]
-                    name = v["tool_shed_repository"]["name"]
-                    owner = v["tool_shed_repository"]["owner"]
-                    changeset_revision = v["tool_shed_repository"]["changeset_revision"]
-                    logging.debug("Installed tool versions for tool {0} match the version in the specified workflow (changeset {1})".format(name, changeset_revision))
-
-    logging.info("Tools versions and changesets from workflow validated")
-
-
-if __name__ == "__main__":
-    parser = argparse.ArgumentParser(description="Run Galaxy workflows, specific to Phaeoexplorer data")
-
-    parser.add_argument("input",
-                        type=str,
-                        help="Input file (yml)")
-
-    parser.add_argument("-v", "--verbose",
-                        help="Increase output verbosity",
-                        action="store_true")
-
-    parser.add_argument("--config",
-                        type=str,
-                        help="Config path, default to the 'config' file inside the script repository")
-
-    parser.add_argument("--main-directory",
-                        type=str,
-                        help="Where the stack containers will be located, defaults to working directory")
-
-    parser.add_argument("--workflow", "-w",
-                        type=str,
-                        help="Worfklow to run. Available options: load_fasta_gff_jbrowse, blast, interpro")
-
-    args = parser.parse_args()
-
-    bioblend_logger = logging.getLogger("bioblend")
-    if args.verbose:
-        logging.basicConfig(level=logging.DEBUG)
-        bioblend_logger.setLevel(logging.DEBUG)
-    else:
-        logging.basicConfig(level=logging.INFO)
-        bioblend_logger.setLevel(logging.INFO)
-
-    # Parsing the config file if provided, using the default config otherwise
-    if not args.config:
-        args.config = os.path.join(os.path.dirname(os.path.realpath(sys.argv[0])), "config")
-    else:
-        args.config = os.path.abspath(args.config)
-
-    if args.config:
-        config_file = os.path.abspath(args.config)
-    else:
-        config_file = os.path.join(os.path.dirname(os.path.realpath(sys.argv[0])), constants.DEFAULT_CONFIG)
-    config = utilities.parse_config(config_file)
-
-    if not args.main_directory:
-        args.main_directory = os.getcwd()
-    else:
-        args.main_directory = os.path.abspath(args.main_directory)
-
-    sp_dict_list = utilities.parse_input(args.input)
-
-    workflow_valid_types = ["load_fasta_gff_jbrowse", "blast", "interpro"]
-
-    workflow_type = None
-
-    #  Checking if user specified a workflow to run
-    if not args.workflow:
-        logging.critical("No workflow type specified, exiting")
-        sys.exit()
-    elif args.workflow in workflow_valid_types:
-        workflow_type = args.workflow
-    logging.info("Workflow type set to %s" % workflow_type)
-
-    script_dir = os.path.dirname(os.path.realpath(sys.argv[0]))
-    config = utilities.parse_config(args.config)
-    all_sp_workflow_dict = {}
-
-
-    if workflow_type == "load_fasta_gff_jbrowse":
-        for sp_dict in sp_dict_list:
-
-            # Add and retrieve all analyses/organisms for the current input species and add their IDs to the input dictionary
-            current_sp_workflow_dict = create_sp_workflow_dict(sp_dict, main_dir=args.main_directory, config=config, workflow_type="load_fasta_gff_jbrowse")
-
-            current_sp_key = list(current_sp_workflow_dict.keys())[0]
-            current_sp_value = list(current_sp_workflow_dict.values())[0]
-            current_sp_strain_sex_key = list(current_sp_value.keys())[0]
-            current_sp_strain_sex_value = list(current_sp_value.values())[0]
-
-            # Add the species dictionary to the complete dictionary
-            # This dictionary contains every organism present in the input file
-            # Its structure is the following:
-            # {genus species: {strain1_sex1: {variables_key: variables_values}, strain1_sex2: {variables_key: variables_values}}}
-            if not current_sp_key in all_sp_workflow_dict.keys():
-                all_sp_workflow_dict[current_sp_key] = current_sp_value
-            else:
-                all_sp_workflow_dict[current_sp_key][current_sp_strain_sex_key] = current_sp_strain_sex_value
-
-        for k, v in all_sp_workflow_dict.items():
-            if len(list(v.keys())) == 1:
-                logging.info("Input organism %s: 1 species detected in input dictionary" % k)
-
-                # Set workflow path (1 organism)
-                workflow_path = os.path.join(os.path.abspath(script_dir), "workflows_phaeoexplorer/Galaxy-Workflow-chado_load_tripal_synchronize_jbrowse_1org_v4.ga")
-
-                # Instance object required variables
-                instance_url, email, password = None, None, None
-
-                # Set the galaxy instance variables
-                for k2, v2 in v.items():
-                    instance_url = v2["instance_url"]
-                    email = v2["email"]
-                    password = v2["password"]
-
-                instance = galaxy.GalaxyInstance(url=instance_url, email=email, password=password)
-
-                # Check if the versions of tools specified in the workflow are installed in galaxy
-                install_changesets_revisions_from_workflow(workflow_path=workflow_path, instance=instance)
-
-                organism_key_name = list(v.keys())
-                org_dict = v[organisms_key_name[0]]
-
-                # print("\n")
-                # print(org_dict)
-
-                history_id = org_dict["history_id"]
-
-                # Organism 1 attributes
-                org_genus = org_dict["genus"]
-                org_species = org_dict["species"]
-                org_genus_species = org_dict["genus_species"]
-                org_species_folder_name = org_dict["species_folder_name"]
-                org_full_name = org_dict["full_name"]
-                org_strain = org_dict["sex"]
-                org_sex = org_dict["strain"]
-                org_org_id = org_dict["org_id"]
-                org_genome_analysis_id = org_dict["genome_analysis_id"]
-                org_ogs_analysis_id = org_dict["ogs_analysis_id"]
-                org_genome_hda_id = org_dict["hda_ids"]["genome_hda_id"]
-                org_transcripts_hda_id = org_dict["hda_ids"]["transcripts_hda_id"]
-                org_proteins_hda_id = org_dict["hda_ids"]["proteins_hda_id"]
-                org_gff_hda_id = org_dict["hda_ids"]["gff_hda_id"]
-
-                # Store these values into a dict for parameters logging/validation
-                org_parameters_dict = {
-                    "org_genus": org_genus,
-                    "org_species": org_species,
-                    "org_genus_species": org_genus_species,
-                    "org_species_folder_name": org_species_folder_name,
-                    "org_full_name": org_full_name,
-                    "org_strain": org_strain,
-                    "org_sex": org_sex,
-                    "org_org_id": org_org_id,
-                    "org_genome_analysis_id": org_genome_analysis_id,
-                    "org_ogs_analysis_id": org_ogs_analysis_id,
-                    "org_genome_hda_id": org_genome_hda_id,
-                    "org_transcripts_hda_id": org_transcripts_hda_id,
-                    "org_proteins_hda_id": org_proteins_hda_id,
-                    "org_gff_hda_id": org_gff_hda_id,
-                }
-
-                # Look for empty parameters values, throw a critical error if a parameter value is invalid
-                for param_name, param_value in org_parameters_dict.items():
-                    if param_value is None or param_value == "":
-                        logging.critical("Empty parameter value found for organism {0} (parameter: {1}, parameter value: {2})".format(org_full_name, param_name, param_value))
-                        sys.exit()
-
-                # Set the workflow parameters (individual tools runtime parameters in the workflow)
-                workflow_parameters = {}
-
-                GENOME_FASTA_FILE_ORG = "0"
-                GFF_FILE_ORG = "1"
-                PROTEINS_FASTA_FILE_ORG = "2"
-                LOAD_FASTA_ORG = "3"
-                JBROWSE_ORG = "4"
-                LOAD_GFF_ORG = "5"
-                JBROWSE_CONTAINER = "6"
-                SYNC_FEATURES_ORG = "7"
-                POPULATE_MAT_VIEWS = "8"
-                INDEX_TRIPAL_DATA = "9"
-
-                # Input files have no parameters (they are set via assigning the hda IDs in the datamap parameter of the bioblend method)
-                workflow_parameters[GENOME_FASTA_FILE_ORG] = {}
-                workflow_parameters[GFF_FILE_ORG] = {}
-                workflow_parameters[PROTEINS_FASTA_FILE_ORG] = {}
-                workflow_parameters[LOAD_FASTA_ORG] = {"organism": org_org_id,
-                                                        "analysis_id": org_genome_analysis_id,
-                                                        "do_update": "true"}
-                workflow_parameters[JBROWSE_ORG] = {}
-                workflow_parameters[LOAD_GFF_ORG] = {"organism": org_org_id, "analysis_id": org_ogs_analysis_id}
-                workflow_parameters[SYNC_FEATURES_ORG] = {"organism_id":  org_org_id}
-                # POPULATE + INDEX DATA
-                workflow_parameters[POPULATE_MAT_VIEWS] = {}
-                workflow_parameters[INDEX_TRIPAL_DATA] = {}
-
-                # Set datamap (mapping of input files in the workflow)
-                datamap = {}
-
-                datamap[GENOME_FASTA_FILE_ORG] = {"src": "hda", "id": org_genome_hda_id}
-                datamap[GFF_FILE_ORG] = {"src": "hda", "id": org_gff_hda_id}
-                datamap[PROTEINS_FASTA_FILE_ORG] = {"src": "hda", "id": org_proteins_hda_id}
-
-
-                with open(workflow_path, 'r') as ga_in_file:
-
-                    # Store the decoded json dictionary
-                    workflow_dict = json.load(ga_in_file)
-                    workflow_name = workflow_dict["name"]
-
-                    # For the Jbrowse tool, we unfortunately have to manually edit the parameters instead of setting them
-                    # as runtime values, using runtime parameters makes the tool throw an internal critical error ("replace not found" error)
-                    # Scratchgmod test: need "http" (or "https"), the hostname (+ port)
-                    jbrowse_menu_url_org = "https://{hostname}/sp/{genus_sp}/feature/{Genus}/{species}/mRNA/{id}".format(hostname=config["hostname"], genus_sp=org_genus_species, Genus=org_genus[0].upper() + org_genus[1:], species=org_species, id="{id}")
-                    if "jbrowse_menu_url" not in config.keys():
-                        jbrowse_menu_url_org = "https://{hostname}/sp/{genus_sp}/feature/{Genus}/{species}/mRNA/{id}".format(hostname=config["hostname"], genus_sp=org_genus_species, Genus=org_genus[0].upper() + org_genus[1:], species=org_species, id="{id}")
-                    else:
-                        jbrowse_menu_url_org = config["jbrowse_menu_url"] + "/sp/{genus_sp}/feature/{Genus}/{species}/mRNA/{id}".format(genus_sp=org_genus_species, Genus=org_genus[0].upper() + org_genus[1:], species=org_species, id="{id}")
-
-                    # Replace values in the workflow dictionary
-                    workflow_dict["steps"]["4"]["tool_state"] = workflow_dict["steps"]["4"]["tool_state"].replace("__MENU_URL_ORG__", jbrowse_menu_url_org)
-                    workflow_dict["steps"]["6"]["tool_state"] = workflow_dict["steps"]["6"]["tool_state"].replace("__DISPLAY_NAME_ORG__", org_full_name).replace("__UNIQUE_ID_ORG__", org_species_folder_name)
-
-                    # Import the workflow in galaxy as a dict
-                    instance.workflows.import_workflow_dict(workflow_dict=workflow_dict)
-
-                    # Get its attributes
-                    workflow_attributes = instance.workflows.get_workflows(name=workflow_name)
-                    # Then get its ID (required to invoke the workflow)
-                    workflow_id = workflow_attributes[0]["id"]  # Index 0 is the most recently imported workflow (the one we want)
-                    show_workflow = instance.workflows.show_workflow(workflow_id=workflow_id)
-                    # Check if the workflow is found
-                    try:
-                        logging.debug("Workflow ID: %s" % workflow_id)
-                    except bioblend.ConnectionError:
-                        logging.warning("Error finding workflow %s" % workflow_name)
-
-                    # Finally, invoke the workflow alogn with its datamap, parameters and the history in which to invoke it
-                    instance.workflows.invoke_workflow(workflow_id=workflow_id, history_id=history_id, params=workflow_parameters, inputs=datamap, allow_tool_state_corrections=True)
-
-                    logging.info("Successfully imported and invoked workflow {0}, check the galaxy instance ({1}) for the jobs state".format(workflow_name, instance_url))
-
-
-            if len(list(v.keys())) == 2:
-
-                logging.info("Input organism %s: 2 species detected in input dictionary" % k)
-
-                # Set workflow path (2 organisms)
-                workflow_path = os.path.join(os.path.abspath(script_dir), "workflows_phaeoexplorer/Galaxy-Workflow-chado_load_tripal_synchronize_jbrowse_2org_v4.ga")
-
-                # Instance object required variables
-                instance_url, email, password = None, None, None
-
-                # Set the galaxy instance variables
-                for k2, v2 in v.items():
-                    instance_url = v2["instance_url"]
-                    email = v2["email"]
-                    password = v2["password"]
-
-                instance = galaxy.GalaxyInstance(url=instance_url, email=email, password=password)
-
-                # Check if the versions of tools specified in the workflow are installed in galaxy
-                install_changesets_revisions_from_workflow(workflow_path=workflow_path, instance=instance)
-
-                # Get key names from the current organism (item 1 = organism 1, item 2 = organism 2)
-                organisms_key_names = list(v.keys())
-                org1_dict = v[organisms_key_names[0]]
-                org2_dict = v[organisms_key_names[1]]
-
-                history_id = org1_dict["history_id"]
-
-                # Organism 1 attributes
-                org1_genus = org1_dict["genus"]
-                org1_species = org1_dict["species"]
-                org1_genus_species = org1_dict["genus_species"]
-                org1_species_folder_name = org1_dict["species_folder_name"]
-                org1_full_name = org1_dict["full_name"]
-                org1_strain = org1_dict["sex"]
-                org1_sex = org1_dict["strain"]
-                org1_org_id = org1_dict["org_id"]
-                org1_genome_analysis_id = org1_dict["genome_analysis_id"]
-                org1_ogs_analysis_id = org1_dict["ogs_analysis_id"]
-                org1_genome_hda_id = org1_dict["hda_ids"]["genome_hda_id"]
-                org1_transcripts_hda_id = org1_dict["hda_ids"]["transcripts_hda_id"]
-                org1_proteins_hda_id = org1_dict["hda_ids"]["proteins_hda_id"]
-                org1_gff_hda_id = org1_dict["hda_ids"]["gff_hda_id"]
-
-                # Store these values into a dict for parameters logging/validation
-                org1_parameters_dict = {
-                    "org1_genus": org1_genus,
-                    "org1_species": org1_species,
-                    "org1_genus_species": org1_genus_species,
-                    "org1_species_folder_name": org1_species_folder_name,
-                    "org1_full_name": org1_full_name,
-                    "org1_strain": org1_strain,
-                    "org1_sex": org1_sex,
-                    "org1_org_id": org1_org_id,
-                    "org1_genome_analysis_id": org1_genome_analysis_id,
-                    "org1_ogs_analysis_id": org1_ogs_analysis_id,
-                    "org1_genome_hda_id": org1_genome_hda_id,
-                    "org1_transcripts_hda_id": org1_transcripts_hda_id,
-                    "org1_proteins_hda_id": org1_proteins_hda_id,
-                    "org1_gff_hda_id": org1_gff_hda_id,
-                }
-
-                # Look for empty parameters values, throw a critical error if a parameter value is invalid
-                for param_name, param_value in org1_parameters_dict.items():
-                    if param_value is None or param_value == "":
-                        logging.critical("Empty parameter value found for organism {0} (parameter: {1}, parameter value: {2})".format(org1_full_name, param_name, param_value))
-                        sys.exit()
-
-                # Organism 2 attributes
-                org2_genus = org2_dict["genus"]
-                org2_species = org2_dict["species"]
-                org2_genus_species = org2_dict["genus_species"]
-                org2_species_folder_name = org2_dict["species_folder_name"]
-                org2_full_name = org2_dict["full_name"]
-                org2_strain = org2_dict["sex"]
-                org2_sex = org2_dict["strain"]
-                org2_org_id = org2_dict["org_id"]
-                org2_genome_analysis_id = org2_dict["genome_analysis_id"]
-                org2_ogs_analysis_id = org2_dict["ogs_analysis_id"]
-                org2_genome_hda_id = org2_dict["hda_ids"]["genome_hda_id"]
-                org2_transcripts_hda_id = org2_dict["hda_ids"]["transcripts_hda_id"]
-                org2_proteins_hda_id = org2_dict["hda_ids"]["proteins_hda_id"]
-                org2_gff_hda_id = org2_dict["hda_ids"]["gff_hda_id"]
-
-                # Store these values into a dict for parameters logging/validation
-                org2_parameters_dict = {
-                    "org2_genus": org2_genus,
-                    "org2_species": org2_species,
-                    "org2_genus_species": org2_genus_species,
-                    "org2_species_folder_name": org2_species_folder_name,
-                    "org2_full_name": org2_full_name,
-                    "org2_strain": org2_strain,
-                    "org2_sex": org2_sex,
-                    "org2_org_id": org2_org_id,
-                    "org2_genome_analysis_id": org2_genome_analysis_id,
-                    "org2_ogs_analysis_id": org2_ogs_analysis_id,
-                    "org2_genome_hda_id": org2_genome_hda_id,
-                    "org2_transcripts_hda_id": org2_transcripts_hda_id,
-                    "org2_proteins_hda_id": org2_proteins_hda_id,
-                    "org2_gff_hda_id": org2_gff_hda_id,
-                }
-
-                # Look for empty parameters values, throw a critical error if a parameter value is invalid
-                for param_name, param_value in org2_parameters_dict.items():
-                    if param_value is None or param_value == "":
-                        logging.critical("Empty parameter value found for organism {0} (parameter: {1}, parameter value: {2})".format(org2_full_name, param_name, param_value))
-                        sys.exit()
-
-                # Source files association (ordered by their IDs in the workflow)
-                # WARNING: Be very careful about how the workflow is "organized" (i.e the order of the steps/datasets, check the .ga if there is any error)
-                GFF_FILE_ORG1 = "0"
-                GENOME_FASTA_FILE_ORG1 = "1"
-                PROTEINS_FASTA_FILE_ORG1 = "2"
-
-                GENOME_FASTA_FILE_ORG2 = "3"
-                GFF_FILE_ORG2 = "4"
-                PROTEINS_FASTA_FILE_ORG2 = "5"
-
-                LOAD_FASTA_ORG1 = "6"
-                JBROWSE_ORG1 = "7"
-                JRBOWSE_ORG2 = "8"
-
-                LOAD_GFF_ORG1 = "9"
-                JBROWSE_CONTAINER = "10"
-                SYNC_FEATURES_ORG1 = "11"
-
-                LOAD_FASTA_ORG2 = "12"
-                LOAD_GFF_ORG2 = "13"
-
-                SYNC_FEATURES_ORG2 = "14"
-                POPULATE_MAT_VIEWS = "15"
-                INDEX_TRIPAL_DATA = "16"
-
-                # Set the workflow parameters (individual tools runtime parameters in the workflow)
-                workflow_parameters = {}
-
-                # Input files have no parameters (they are set via assigning the hda IDs in the datamap parameter of the bioblend method)
-                workflow_parameters[GENOME_FASTA_FILE_ORG1] = {}
-                workflow_parameters[GFF_FILE_ORG1] = {}
-                workflow_parameters[PROTEINS_FASTA_FILE_ORG1] = {}
-                workflow_parameters[GENOME_FASTA_FILE_ORG2] = {}
-                workflow_parameters[GFF_FILE_ORG2] = {}
-                workflow_parameters[PROTEINS_FASTA_FILE_ORG2] = {}
-
-                # Organism 1
-                workflow_parameters[LOAD_FASTA_ORG1] = {"organism": org1_org_id,
-                                                        "analysis_id": org1_genome_analysis_id,
-                                                        "do_update": "true"}
-                # workflow_parameters[JBROWSE_ORG1] = {"jbrowse_menu_url": jbrowse_menu_url_org1}
-                workflow_parameters[JBROWSE_ORG1] = {}
-                workflow_parameters[LOAD_GFF_ORG1] = {"organism": org1_org_id, "analysis_id": org1_ogs_analysis_id}
-                workflow_parameters[SYNC_FEATURES_ORG1] = {"organism_id":  org1_org_id}
-                # workflow_parameters[JBROWSE_CONTAINER] = {"organisms": [{"name": org1_full_name, "unique_id": org1_species_folder_name, }, {"name": org2_full_name, "unique_id": org2_species_folder_name}]}
-                workflow_parameters[JBROWSE_CONTAINER] = {}
-
-                # Organism 2
-                workflow_parameters[LOAD_FASTA_ORG2] = {"organism": org2_org_id,
-                                                        "analysis_id": org2_genome_analysis_id,
-                                                        "do_update": "true"}
-                workflow_parameters[LOAD_GFF_ORG2] = {"organism": org2_org_id, "analysis_id": org2_ogs_analysis_id}
-                # workflow_parameters[JRBOWSE_ORG2] = {"jbrowse_menu_url": jbrowse_menu_url_org2}
-                workflow_parameters[JRBOWSE_ORG2] = {}
-                workflow_parameters[SYNC_FEATURES_ORG2] = {"organism_id":  org2_org_id}
-
-
-                # POPULATE + INDEX DATA
-                workflow_parameters[POPULATE_MAT_VIEWS] = {}
-                workflow_parameters[INDEX_TRIPAL_DATA] = {}
-
-
-                # Set datamap (mapping of input files in the workflow)
-                datamap = {}
-
-                # Organism 1
-                datamap[GENOME_FASTA_FILE_ORG1] = {"src": "hda", "id": org1_genome_hda_id}
-                datamap[GFF_FILE_ORG1] = {"src": "hda", "id": org1_gff_hda_id}
-                datamap[PROTEINS_FASTA_FILE_ORG1] = {"src": "hda", "id": org1_proteins_hda_id}
-
-                # Organism 2
-                datamap[GENOME_FASTA_FILE_ORG2] = {"src": "hda", "id": org2_genome_hda_id}
-                datamap[GFF_FILE_ORG2] = {"src": "hda", "id": org2_gff_hda_id}
-                datamap[PROTEINS_FASTA_FILE_ORG2] = {"src": "hda", "id": org2_proteins_hda_id}
-
-                with open(workflow_path, 'r') as ga_in_file:
-
-                    # Store the decoded json dictionary
-                    workflow_dict = json.load(ga_in_file)
-                    workflow_name = workflow_dict["name"]
-
-                    # For the Jbrowse tool, we unfortunately have to manually edit the parameters instead of setting them
-                    # as runtime values, using runtime parameters makes the tool throw an internal critical error ("replace not found" error)
-                    # Scratchgmod test: need "http" (or "https"), the hostname (+ port)
-                    jbrowse_menu_url_org1 = "https://{hostname}/sp/{genus_sp}/feature/{Genus}/{species}/mRNA/{id}".format(hostname=config["hostname"], genus_sp=org1_genus_species, Genus=org1_genus[0].upper() + org1_genus[1:], species=org1_species, id="{id}")
-                    jbrowse_menu_url_org2 = "https://{hostname}/sp/{genus_sp}/feature/{Genus}/{species}/mRNA/{id}".format(hostname=config["hostname"], genus_sp=org2_genus_species, Genus=org2_genus[0].upper() + org2_genus[1:], species=org2_species, id="{id}")
-                    if "jbrowse_menu_url" not in config.keys():
-                        jbrowse_menu_url_org1 = "https://{hostname}/sp/{genus_sp}/feature/{Genus}/{species}/mRNA/{id}".format(hostname=config["hostname"], genus_sp=org1_genus_species, Genus=org1_genus[0].upper() + org1_genus[1:], species=org1_species, id="{id}")
-                        jbrowse_menu_url_org2 = "https://{hostname}/sp/{genus_sp}/feature/{Genus}/{species}/mRNA/{id}".format(hostname=config["hostname"], genus_sp=org2_genus_species, Genus=org2_genus[0].upper() + org2_genus[1:], species=org2_species, id="{id}")
-                    else:
-                        jbrowse_menu_url_org1 = config["jbrowse_menu_url"] + "/sp/{genus_sp}/feature/{Genus}/{species}/mRNA/{id}".format(genus_sp=org1_genus_species, Genus=org1_genus[0].upper() + org1_genus[1:], species=org1_species, id="{id}")
-                        jbrowse_menu_url_org2 = config["jbrowse_menu_url"] + "/sp/{genus_sp}/feature/{Genus}/{species}/mRNA/{id}".format(genus_sp=org2_genus_species, Genus=org2_genus[0].upper() + org2_genus[1:], species=org2_species, id="{id}")
-
-                    # show_tool_add_organism = instance.tools.show_tool(tool_id="toolshed.g2.bx.psu.edu/repos/gga/chado_organism_add_organism/organism_add_organism/2.3.4+galaxy0", io_details=True)
-                    # print(show_tool_add_organism)
-                    # show_jbrowse_tool = instance.tools.show_tool(tool_id="toolshed.g2.bx.psu.edu/repos/iuc/jbrowse/jbrowse/1.16.11+galaxy0", io_details=True)
-                    # print(show_jbrowse_tool)
-                    # show_jbrowse_container_tool = instance.tools.show_tool(tool_id="toolshed.g2.bx.psu.edu/repos/gga/jbrowse_to_container/jbrowse_to_container/0.5.1", io_details=True)
-                    # print(show_jbrowse_container_tool)
-
-                    # Replace values in the workflow dictionary
-                    workflow_dict["steps"]["7"]["tool_state"] = workflow_dict["steps"]["7"]["tool_state"].replace("__MENU_URL_ORG1__", jbrowse_menu_url_org1)
-                    workflow_dict["steps"]["8"]["tool_state"] = workflow_dict["steps"]["8"]["tool_state"].replace("__MENU_URL_ORG2__", jbrowse_menu_url_org2)
-                    # The UNIQUE_ID is specific to a combination genus_species_strain_sex so every combination should have its unique workflow
-                    # in galaxy --> define a naming method for these workflows
-                    workflow_dict["steps"]["10"]["tool_state"] = workflow_dict["steps"]["10"]["tool_state"].replace("__DISPLAY_NAME_ORG1__", org1_full_name).replace("__UNIQUE_ID_ORG1__", org1_species_folder_name)
-                    workflow_dict["steps"]["10"]["tool_state"] = workflow_dict["steps"]["10"]["tool_state"].replace("__DISPLAY_NAME_ORG2__", org2_full_name).replace("__UNIQUE_ID_ORG2__", org2_species_folder_name)
-
-                    # Import the workflow in galaxy as a dict
-                    instance.workflows.import_workflow_dict(workflow_dict=workflow_dict)
-
-                    # Get its attributes
-                    workflow_attributes = instance.workflows.get_workflows(name=workflow_name)
-                    # Then get its ID (required to invoke the workflow)
-                    workflow_id = workflow_attributes[0]["id"]  # Index 0 is the most recently imported workflow (the one we want)
-                    show_workflow = instance.workflows.show_workflow(workflow_id=workflow_id)
-                    # Check if the workflow is found
-                    try:
-                        logging.debug("Workflow ID: %s" % workflow_id)
-                    except bioblend.ConnectionError:
-                        logging.warning("Error finding workflow %s" % workflow_name)
-
-                    # Finally, invoke the workflow alogn with its datamap, parameters and the history in which to invoke it
-                    instance.workflows.invoke_workflow(workflow_id=workflow_id, history_id=history_id, params=workflow_parameters, inputs=datamap, allow_tool_state_corrections=True)
-
-                    logging.info("Successfully imported and invoked workflow {0}, check the galaxy instance ({1}) for the jobs state".format(workflow_name, instance_url))
-
-    if workflow_type == "blast":
-        for sp_dict in sp_dict_list:
-
-            # Add and retrieve all analyses/organisms for the current input species and add their IDs to the input dictionary
-            current_sp_workflow_dict = create_sp_workflow_dict(sp_dict, main_dir=args.main_directory, config=config, workfow_type="blast")
-
-            current_sp_key = list(current_sp_workflow_dict.keys())[0]
-            current_sp_value = list(current_sp_workflow_dict.values())[0]
-            current_sp_strain_sex_key = list(current_sp_value.keys())[0]
-            current_sp_strain_sex_value = list(current_sp_value.values())[0]
-
-            # Add the species dictionary to the complete dictionary
-            # This dictionary contains every organism present in the input file
-            # Its structure is the following:
-            # {genus species: {strain1_sex1: {variables_key: variables_values}, strain1_sex2: {variables_key: variables_values}}}
-            if not current_sp_key in all_sp_workflow_dict.keys():
-                all_sp_workflow_dict[current_sp_key] = current_sp_value
-            else:
-                all_sp_workflow_dict[current_sp_key][current_sp_strain_sex_key] = current_sp_strain_sex_value
-
-        if len(list(v.keys())) == 1:
-            logging.info("Input organism %s: 1 species detected in input dictionary" % k)
-
-            # Set workflow path (1 organism)
-            workflow_path = os.path.join(os.path.abspath(script_dir), "workflows_phaeoexplorer/Galaxy-Workflow-load_blast_results_1org_v1.ga")
-
-            # Instance object required variables
-            instance_url, email, password = None, None, None
-
-            # Set the galaxy instance variables
-            for k2, v2 in v.items():
-                instance_url = v2["instance_url"]
-                email = v2["email"]
-                password = v2["password"]
-
-            instance = galaxy.GalaxyInstance(url=instance_url, email=email, password=password)
-
-            # Check if the versions of tools specified in the workflow are installed in galaxy
-            install_changesets_revisions_from_workflow(workflow_path=workflow_path, instance=instance)
-
-            organism_key_name = list(v.keys())
-            org_dict = v[organisms_key_name[0]]
-
-            history_id = org_dict["history_id"]
-
-            # Organism attributes
-            org_genus = org_dict["genus"]
-            org_species = org_dict["species"]
-            org_genus_species = org_dict["genus_species"]
-            org_species_folder_name = org_dict["species_folder_name"]
-            org_full_name = org_dict["full_name"]
-            org_strain = org_dict["sex"]
-            org_sex = org_dict["strain"]
-            org_org_id = org_dict["org_id"]
-            org_blastp_analysis_id = org_dict["blastp_analysis_id"]
-            org_blastp_hda_id = org_dict["hda_ids"]["blastp_hda_id"]
-
-            # Store these values into a dict for parameters logging/validation
-            org_parameters_dict = {
-                "org_genus": org_genus,
-                "org_species": org_species,
-                "org_genus_species": org_genus_species,
-                "org_species_folder_name": org_species_folder_name,
-                "org_full_name": org_full_name,
-                "org_strain": org_strain,
-                "org_sex": org_sex,
-                "org_org_id": org_org_id,
-                "org_blast_analysis_id": org_blastp_analysis_id,
-                "org_blastp_hda_id": org_blastp_hda_id,
-            }
-
-            # Look for empty parameters values, throw a critical error if a parameter value is invalid
-            for param_name, param_value in org_parameters_dict.items():
-                if param_value is None or param_value == "":
-                    logging.critical("Empty parameter value found for organism {0} (parameter: {1}, parameter value: {2})".format(org_full_name, param_name, param_value))
-                    sys.exit()
-
-            BLASTP_FILE = "0"
-            LOAD_BLASTP_FILE = "1"
-            POPULATE_MAT_VIEWS = "2"
-            INDEX_TRIPAL_DATA = "3"
-
-            # Set the workflow parameters (individual tools runtime parameters in the workflow)
-            workflow_parameters = {}
-            workflow_parameters[BLASTP_FILE] = {}
-            workflow_parameters[LOAD_BLASTP_FILE] = {"analysis_id": org_blastp_analysis_id, "organism_id": org_org_id}
-            workflow_parameters[POPULATE_MAT_VIEWS] = {}
-            workflow_parameters[INDEX_TRIPAL_DATA] = {}
-
-            datamap = {}
-            datamap[BLASTP_FILE] = {"src": "hda", "id": org_blastp_hda_id}
-
-            with open(workflow_path, 'r') as ga_in_file:
-                # Store the decoded json dictionary
-                workflow_dict = json.load(ga_in_file)
-                workflow_name = workflow_dict["name"]
-
-                # Import the workflow in galaxy as a dict
-                instance.workflows.import_workflow_dict(workflow_dict=workflow_dict)
-                # Get its attributes
-                workflow_attributes = instance.workflows.get_workflows(name=workflow_name)
-                # Then get its ID (required to invoke the workflow)
-                workflow_id = workflow_attributes[0]["id"]  # Index 0 is the most recently imported workflow (the one we want)
-                show_workflow = instance.workflows.show_workflow(workflow_id=workflow_id)
-                # Check if the workflow is found
-                try:
-                    logging.debug("Workflow ID: %s" % workflow_id)
-                except bioblend.ConnectionError:
-                    logging.warning("Error finding workflow %s" % workflow_name)
-
-                # Finally, invoke the workflow alogn with its datamap, parameters and the history in which to invoke it
-                instance.workflows.invoke_workflow(workflow_id=workflow_id, history_id=history_id, params=workflow_parameters, inputs=datamap, allow_tool_state_corrections=True)
-
-                logging.info("Successfully imported and invoked workflow {0}, check the galaxy instance ({1}) for the jobs state".format(workflow_name, instance_url))
-
-
-
-        if len(list(v.keys())) == 2:
-
-            logging.info("Input organism %s: 2 species detected in input dictionary" % k)
-
-            # Set workflow path (2 organisms)
-            workflow_path = os.path.join(os.path.abspath(script_dir), "workflows_phaeoexplorer/Galaxy-Workflow-load_blast_results_2org_v1.ga")
-
-            # Instance object required variables
-            instance_url, email, password = None, None, None
-
-            # Set the galaxy instance variables
-            for k2, v2 in v.items():
-                instance_url = v2["instance_url"]
-                email = v2["email"]
-                password = v2["password"]
-
-            instance = galaxy.GalaxyInstance(url=instance_url, email=email, password=password)
-
-            # Check if the versions of tools specified in the workflow are installed in galaxy
-            install_changesets_revisions_from_workflow(workflow_path=workflow_path, instance=instance)
-
-            organisms_key_names = list(v.keys())
-            org1_dict = v[organisms_key_names[0]]
-            org2_dict = v[organisms_key_names[1]]
-
-            history_id = org1_dict["history_id"]
-
-            # Organism 1 attributes
-            org1_genus = org1_dict["genus"]
-            org1_species = org1_dict["species"]
-            org1_genus_species = org1_dict["genus_species"]
-            org1_species_folder_name = org1_dict["species_folder_name"]
-            org1_full_name = org1_dict["full_name"]
-            org1_strain = org1_dict["sex"]
-            org1_sex = org1_dict["strain"]
-            org1_org_id = org1_dict["org_id"]
-            org1_blastp_analysis_id = org1_dict["blastp_analysis_id"]
-            org1_blastp_hda_id = org1_dict["hda_ids"]["blastp_hda_id"]
-
-            # Store these values into a dict for parameters logging/validation
-            org1_parameters_dict = {
-                "org1_genus": org1_genus,
-                "org1_species": org1_species,
-                "org1_genus_species": org1_genus_species,
-                "org1_species_folder_name": org1_species_folder_name,
-                "org1_full_name": org1_full_name,
-                "org1_strain": org1_strain,
-                "org1_sex": org1_sex,
-                "org1_org_id": org1_org_id,
-                "org1_blast_analysis_id": org1_blastp_analysis_id,
-                "org1_blastp_hda_id": org1_blastp_hda_id,
-            }
-
-
-            # Look for empty parameters values, throw a critical error if a parameter value is invalid
-            for param_name, param_value in org1_parameters_dict.items():
-                if param_value is None or param_value == "":
-                    logging.critical("Empty parameter value found for organism {0} (parameter: {1}, parameter value: {2})".format(org1_full_name, param_name, param_value))
-                    sys.exit()
-
-            # Organism 2 attributes
-            org2_genus = org2_dict["genus"]
-            org2_species = org2_dict["species"]
-            org2_genus_species = org2_dict["genus_species"]
-            org2_species_folder_name = org2_dict["species_folder_name"]
-            org2_full_name = org2_dict["full_name"]
-            org2_strain = org2_dict["sex"]
-            org2_sex = org2_dict["strain"]
-            org2_org_id = org2_dict["org_id"]
-            org2_blastp_analysis_id = org2_dict["blastp_analysis_id"]
-            org2_blastp_hda_id = org2_dict["hda_ids"]["blastp_hda_id"]
-
-            # Store these values into a dict for parameters logging/validation
-            org2_parameters_dict = {
-                "org2_genus": org2_genus,
-                "org2_species": org2_species,
-                "org2_genus_species": org2_genus_species,
-                "org2_species_folder_name": orgé_species_folder_name,
-                "org2_full_name": org2_full_name,
-                "org2_strain": org2_strain,
-                "org2_sex": org2_sex,
-                "org2_org_id": org2_org_id,
-                "org2_blast_analysis_id": org2_blastp_analysis_id,
-                "org2_blastp_hda_id": org2_blastp_hda_id,
-            }
-
-
-            # Look for empty parameters values, throw a critical error if a parameter value is invalid
-            for param_name, param_value in org2_parameters_dict.items():
-                if param_value is None or param_value == "":
-                    logging.critical("Empty parameter value found for organism {0} (parameter: {1}, parameter value: {2})".format(org2_full_name, param_name, param_value))
-                    sys.exit()
-
-            # Source files association (ordered by their IDs in the workflow)
-            # WARNING: Be very careful about how the workflow is "organized" (i.e the order of the steps/datasets, check the .ga if there is any error)
-            BLASTP_FILE_ORG1 = "0"
-            BLASTP_FILE_ORG2 = "1"
-            LOAD_BLASTP_FILE_ORG1 = "2"
-            LOAD_BLASTP_FILE_ORG1 = "3"
-            POPULATE_MAT_VIEWS = "4"
-            INDEX_TRIPAL_DATA = "5"
-
-            # Set the workflow parameters (individual tools runtime parameters in the workflow)
-            workflow_parameters = {}
-
-            # Input files have no parameters (they are set via assigning the hda IDs in the datamap parameter of the bioblend method)
-            workflow_parameters[BLASTP_FILE_ORG1] = {}
-            workflow_parameters[BLASTP_FILE_ORG2] = {}
-
-            # Organism 1
-            workflow_parameters[LOAD_BLASTP_FILE_ORG1] = {"organism_id": org1_org_id,
-                                                          "analysis_id": org1_blastp_analysis_id}
-
-            # Organism 2
-            workflow_parameters[LOAD_BLASTP_FILE_ORG2] = {"organism_id": org2_org_id,
-                                                          "analysis_id": org2_blastp_analysis_id}
-
-            workflow_parameters[POPULATE_MAT_VIEWS] = {}
-            workflow_parameters[INDEX_TRIPAL_DATA] = {}
-
-            # Set datamap (mapping of input files in the workflow)
-            datamap = {}
-
-            # Organism 1
-            datamap[BLASTP_FILE_ORG1] = {"src": "hda", "id": org1_blastp_hda_id}
-
-            # Organism 2
-            datamap[BLASTP_FILE_ORG2] = {"src": "hda", "id": org2_blastp_hda_id}
-
-            with open(workflow_path, 'r') as ga_in_file:
-                # Store the decoded json dictionary
-                workflow_dict = json.load(ga_in_file)
-                workflow_name = workflow_dict["name"]
-
-                # Import the workflow in galaxy as a dict
-                instance.workflows.import_workflow_dict(workflow_dict=workflow_dict)
-                # Get its attributes
-                workflow_attributes = instance.workflows.get_workflows(name=workflow_name)
-                # Then get its ID (required to invoke the workflow)
-                workflow_id = workflow_attributes[0]["id"]  # Index 0 is the most recently imported workflow (the one we want)
-                show_workflow = instance.workflows.show_workflow(workflow_id=workflow_id)
-                # Check if the workflow is found
-                try:
-                    logging.debug("Workflow ID: %s" % workflow_id)
-                except bioblend.ConnectionError:
-                    logging.warning("Error finding workflow %s" % workflow_name)
-
-                # Finally, invoke the workflow alogn with its datamap, parameters and the history in which to invoke it
-                instance.workflows.invoke_workflow(workflow_id=workflow_id, history_id=history_id, params=workflow_parameters, inputs=datamap, allow_tool_state_corrections=True)
-
-                logging.info("Successfully imported and invoked workflow {0}, check the galaxy instance ({1}) for the jobs state".format(workflow_name, instance_url))
-
-
-    if workflow_type == "interpro":
-        for sp_dict in sp_dict_list:
-
-            # Add and retrieve all analyses/organisms for the current input species and add their IDs to the input dictionary
-            current_sp_workflow_dict = create_sp_workflow_dict(sp_dict, main_dir=args.main_directory, config=config, workfow_type="blast")
-
-            current_sp_key = list(current_sp_workflow_dict.keys())[0]
-            current_sp_value = list(current_sp_workflow_dict.values())[0]
-            current_sp_strain_sex_key = list(current_sp_value.keys())[0]
-            current_sp_strain_sex_value = list(current_sp_value.values())[0]
-
-            # Add the species dictionary to the complete dictionary
-            # This dictionary contains every organism present in the input file
-            # Its structure is the following:
-            # {genus species: {strain1_sex1: {variables_key: variables_values}, strain1_sex2: {variables_key: variables_values}}}
-            if not current_sp_key in all_sp_workflow_dict.keys():
-                all_sp_workflow_dict[current_sp_key] = current_sp_value
-            else:
-                all_sp_workflow_dict[current_sp_key][current_sp_strain_sex_key] = current_sp_strain_sex_value
-
-        if len(list(v.keys())) == 1:
-            logging.info("Input organism %s: 1 species detected in input dictionary" % k)
-
-            # Set workflow path (1 organism)
-            workflow_path = os.path.join(os.path.abspath(script_dir), "workflows_phaeoexplorer/Galaxy-Workflow-load_blast_results_1org_v1.ga")
-
-            # Instance object required variables
-            instance_url, email, password = None, None, None
-
-            # Set the galaxy instance variables
-            for k2, v2 in v.items():
-                instance_url = v2["instance_url"]
-                email = v2["email"]
-                password = v2["password"]
-
-            instance = galaxy.GalaxyInstance(url=instance_url, email=email, password=password)
-
-            # Check if the versions of tools specified in the workflow are installed in galaxy
-            install_changesets_revisions_from_workflow(workflow_path=workflow_path, instance=instance)
-
-            organism_key_name = list(v.keys())
-            org_dict = v[organisms_key_name[0]]
-
-            history_id = org_dict["history_id"]
-
-            # Organism attributes
-            org_genus = org_dict["genus"]
-            org_species = org_dict["species"]
-            org_genus_species = org_dict["genus_species"]
-            org_species_folder_name = org_dict["species_folder_name"]
-            org_full_name = org_dict["full_name"]
-            org_strain = org_dict["sex"]
-            org_sex = org_dict["strain"]
-            org_org_id = org_dict["org_id"]
-            org_inteproscan_analysis_id = org_dict["inteproscan_analysis_id"]
-            org_interproscan_hda_id = org_dict["hda_ids"]["interproscan_hda_id"]
-
-            # Store these values into a dict for parameters logging/validation
-            org_parameters_dict = {
-                "org_genus": org_genus,
-                "org_species": org_species,
-                "org_genus_species": org_genus_species,
-                "org_species_folder_name": org_species_folder_name,
-                "org_full_name": org_full_name,
-                "org_strain": org_strain,
-                "org_sex": org_sex,
-                "org_org_id": org_org_id,
-                "org_inteproscan_analysis_id": org_inteproscan_analysis_id,
-                "org_interproscan_hda_id": org_interproscan_hda_id,
-            }
-
-            # Look for empty parameters values, throw a critical error if a parameter value is invalid
-            for param_name, param_value in org_parameters_dict.items():
-                if param_value is None or param_value == "":
-                    logging.critical("Empty parameter value found for organism {0} (parameter: {1}, parameter value: {2})".format(org_full_name, param_name, param_value))
-                    sys.exit()
-
-            INTEPRO_FILE = "0"
-            LOAD_INTERPRO_FILE = "1"
-            POPULATE_MAT_VIEWS = "2"
-            INDEX_TRIPAL_DATA = "3"
-
-            # Set the workflow parameters (individual tools runtime parameters in the workflow)
-            workflow_parameters = {}
-            workflow_parameters[INTEPRO_FILE] = {}
-            workflow_parameters[LOAD_INTERPRO_FILE] = {"analysis_id": org_inteproscan_analysis_id, "organism_id": org_org_id}
-            workflow_parameters[POPULATE_MAT_VIEWS] = {}
-            workflow_parameters[INDEX_TRIPAL_DATA] = {}
-
-            datamap = {}
-            datamap[INTEPRO_FILE] = {"src": "hda", "id": org_interproscan_hda_id}
-
-            with open(workflow_path, 'r') as ga_in_file:
-                # Store the decoded json dictionary
-                workflow_dict = json.load(ga_in_file)
-                workflow_name = workflow_dict["name"]
-
-                # Import the workflow in galaxy as a dict
-                instance.workflows.import_workflow_dict(workflow_dict=workflow_dict)
-                # Get its attributes
-                workflow_attributes = instance.workflows.get_workflows(name=workflow_name)
-                # Then get its ID (required to invoke the workflow)
-                workflow_id = workflow_attributes[0]["id"]  # Index 0 is the most recently imported workflow (the one we want)
-                show_workflow = instance.workflows.show_workflow(workflow_id=workflow_id)
-                # Check if the workflow is found
-                try:
-                    logging.debug("Workflow ID: %s" % workflow_id)
-                except bioblend.ConnectionError:
-                    logging.warning("Error finding workflow %s" % workflow_name)
-
-                # Finally, invoke the workflow alogn with its datamap, parameters and the history in which to invoke it
-                instance.workflows.invoke_workflow(workflow_id=workflow_id, history_id=history_id, params=workflow_parameters, inputs=datamap, allow_tool_state_corrections=True)
-
-                logging.info("Successfully imported and invoked workflow {0}, check the galaxy instance ({1}) for the jobs state".format(workflow_name, instance_url))
-
-
-
-        if len(list(v.keys())) == 2:
-
-            logging.info("Input organism %s: 2 species detected in input dictionary" % k)
-
-            # Set workflow path (2 organisms)
-            workflow_path = os.path.join(os.path.abspath(script_dir), "workflows_phaeoexplorer/Galaxy-Workflow-load_blast_results_2org_v1.ga")
-
-            # Instance object required variables
-            instance_url, email, password = None, None, None
-
-            # Set the galaxy instance variables
-            for k2, v2 in v.items():
-                instance_url = v2["instance_url"]
-                email = v2["email"]
-                password = v2["password"]
-
-            instance = galaxy.GalaxyInstance(url=instance_url, email=email, password=password)
-
-            # Check if the versions of tools specified in the workflow are installed in galaxy
-            install_changesets_revisions_from_workflow(workflow_path=workflow_path, instance=instance)
-
-            organisms_key_names = list(v.keys())
-            org1_dict = v[organisms_key_names[0]]
-            org2_dict = v[organisms_key_names[1]]
-
-            history_id = org1_dict["history_id"]
-
-            # Organism 1 attributes
-            org1_genus = org1_dict["genus"]
-            org1_species = org1_dict["species"]
-            org1_genus_species = org1_dict["genus_species"]
-            org1_species_folder_name = org1_dict["species_folder_name"]
-            org1_full_name = org1_dict["full_name"]
-            org1_strain = org1_dict["sex"]
-            org1_sex = org1_dict["strain"]
-            org1_org_id = org1_dict["org_id"]
-            org1_interproscan_analysis_id = org1_dict["interproscan_analysis_id"]
-            org1_interproscan_hda_id = org1_dict["hda_ids"]["interproscan_hda_id"]
-
-            # Store these values into a dict for parameters logging/validation
-            org1_parameters_dict = {
-                "org1_genus": org1_genus,
-                "org1_species": org1_species,
-                "org1_genus_species": org1_genus_species,
-                "org1_species_folder_name": org1_species_folder_name,
-                "org1_full_name": org1_full_name,
-                "org1_strain": org1_strain,
-                "org1_sex": org1_sex,
-                "org1_org_id": org1_org_id,
-                "org1_interproscan_analysis_id": org1_interproscan_analysis_id,
-                "org1_interproscan_hda_id": org1_interproscan_hda_id,
-            }
-
-
-            # Look for empty parameters values, throw a critical error if a parameter value is invalid
-            for param_name, param_value in org1_parameters_dict.items():
-                if param_value is None or param_value == "":
-                    logging.critical("Empty parameter value found for organism {0} (parameter: {1}, parameter value: {2})".format(org1_full_name, param_name, param_value))
-                    sys.exit()
-
-            # Organism 2 attributes
-            org2_genus = org2_dict["genus"]
-            org2_species = org2_dict["species"]
-            org2_genus_species = org2_dict["genus_species"]
-            org2_species_folder_name = org2_dict["species_folder_name"]
-            org2_full_name = org2_dict["full_name"]
-            org2_strain = org2_dict["sex"]
-            org2_sex = org2_dict["strain"]
-            org2_org_id = org2_dict["org_id"]
-            org2_interproscan_analysis_id = org2_dict["interproscan_analysis_id"]
-            org2_interproscan_hda_id = org2_dict["hda_ids"]["interproscan_hda_id"]
-
-            # Store these values into a dict for parameters logging/validation
-            org2_parameters_dict = {
-                "org2_genus": org2_genus,
-                "org2_species": org2_species,
-                "org2_genus_species": org2_genus_species,
-                "org2_species_folder_name": orgé_species_folder_name,
-                "org2_full_name": org2_full_name,
-                "org2_strain": org2_strain,
-                "org2_sex": org2_sex,
-                "org2_org_id": org2_org_id,
-                "org2_interproscan_analysis_id": org2_interproscan_analysis_id,
-                "org2_interproscan_hda_id": org2_interproscan_hda_id,
-            }
-
-
-            # Look for empty parameters values, throw a critical error if a parameter value is invalid
-            for param_name, param_value in org2_parameters_dict.items():
-                if param_value is None or param_value == "":
-                    logging.critical("Empty parameter value found for organism {0} (parameter: {1}, parameter value: {2})".format(org2_full_name, param_name, param_value))
-                    sys.exit()
-
-            # Source files association (ordered by their IDs in the workflow)
-            # WARNING: Be very careful about how the workflow is "organized" (i.e the order of the steps/datasets, check the .ga if there is any error)
-            INTERPRO_FILE_ORG1 = "0"
-            INTERPRO_FILE_ORG2 = "1"
-            LOAD_INTERPRO_FILE_ORG1 = "2"
-            LOAD_INTERPRO_FILE_ORG2 = "3"
-            POPULATE_MAT_VIEWS = "4"
-            INDEX_TRIPAL_DATA = "5"
-
-            # Set the workflow parameters (individual tools runtime parameters in the workflow)
-            workflow_parameters = {}
-
-            # Input files have no parameters (they are set via assigning the hda IDs in the datamap parameter of the bioblend method)
-            workflow_parameters[INTERPRO_FILE_ORG1] = {}
-            workflow_parameters[INTERPRO_FILE_ORG2] = {}
-
-            # Organism 1
-            workflow_parameters[LOAD_INTERPRO_FILE_ORG1] = {"organism_id": org1_org_id,
-                                                          "analysis_id": org1_interproscan_analysis_id}
-
-            # Organism 2
-            workflow_parameters[LOAD_INTERPRO_FILE_ORG2] = {"organism_id": org2_org_id,
-                                                          "analysis_id": org2_interproscan_analysis_id}
-
-            workflow_parameters[POPULATE_MAT_VIEWS] = {}
-            workflow_parameters[INDEX_TRIPAL_DATA] = {}
-
-            # Set datamap (mapping of input files in the workflow)
-            datamap = {}
-
-            # Organism 1
-            datamap[BLASTP_FILE_ORG1] = {"src": "hda", "id": org1_interproscan_hda_id}
-
-            # Organism 2
-            datamap[BLASTP_FILE_ORG2] = {"src": "hda", "id": org2_interproscan_hda_id}
-
-            with open(workflow_path, 'r') as ga_in_file:
-                # Store the decoded json dictionary
-                workflow_dict = json.load(ga_in_file)
-                workflow_name = workflow_dict["name"]
-
-                # Import the workflow in galaxy as a dict
-                instance.workflows.import_workflow_dict(workflow_dict=workflow_dict)
-                # Get its attributes
-                workflow_attributes = instance.workflows.get_workflows(name=workflow_name)
-                # Then get its ID (required to invoke the workflow)
-                workflow_id = workflow_attributes[0]["id"]  # Index 0 is the most recently imported workflow (the one we want)
-                show_workflow = instance.workflows.show_workflow(workflow_id=workflow_id)
-                # Check if the workflow is found
-                try:
-                    logging.debug("Workflow ID: %s" % workflow_id)
-                except bioblend.ConnectionError:
-                    logging.warning("Error finding workflow %s" % workflow_name)
-
-                # Finally, invoke the workflow alogn with its datamap, parameters and the history in which to invoke it
-                instance.workflows.invoke_workflow(workflow_id=workflow_id, history_id=history_id, params=workflow_parameters, inputs=datamap, allow_tool_state_corrections=True)
-
-                logging.info("Successfully imported and invoked workflow {0}, check the galaxy instance ({1}) for the jobs state".format(workflow_name, instance_url))
diff --git a/speciesData.py b/speciesData.py
index d8dd88c643d7216746ab3528c2ed228d9537cd3c..308478e4ef44a4cb3c60cb085ca0cda7b77aaa77 100755
--- a/speciesData.py
+++ b/speciesData.py
@@ -9,6 +9,27 @@ import constants
 
 from _datetime import datetime
 
+def clean_string(string):
+    if not string is None and string != "":
+        cleaned_string = string \
+            .replace(" ", "_") \
+            .replace("-", "_") \
+            .replace("(", "") \
+            .replace(")", "") \
+            .replace("'", "").strip()
+        return cleaned_string
+    else:
+        return string
+
+def set_service_attribute(service, parameters_dictionary):
+    parameters_dictionary_services = parameters_dictionary[constants.ORG_PARAM_SERVICES]
+    service_value = "0"
+    if (service in parameters_dictionary_services.keys()
+            and parameters_dictionary_services[service] is not None
+            and parameters_dictionary_services[service] != ""):
+        service_value = parameters_dictionary_services[service]
+    return service_value
+
 class SpeciesData:
     """
     This class contains attributes and functions to interact with the galaxy container of the GGA environment
@@ -40,37 +61,20 @@ class SpeciesData:
             sys.exit(0)
         return 1
 
-    def clean_string(self, string):
-        if not string is None and string != "":
-            clean_string = string.replace(" ", "_").replace("-", "_").replace("(", "").replace(")", "").replace("'", "").strip()
-            return clean_string
-        else:
-            return string
-
-    def set_service_attribute(self, service, parameters_dictionary):
-        parameters_dictionary_services = parameters_dictionary[constants.ORG_PARAM_SERVICES]
-        service_value = "0"
-        if(service in parameters_dictionary_services.keys()
-                and parameters_dictionary_services[service] is not None
-                and parameters_dictionary_services[service] != ""):
-            service_value = parameters_dictionary_services[service]
-        return service_value
-
     def __init__(self, parameters_dictionary):
         self.parameters_dictionary = parameters_dictionary
         self.name = parameters_dictionary[constants.ORG_PARAM_NAME]
         parameters_dictionary_description=parameters_dictionary[constants.ORG_PARAM_DESC]
         parameters_dictionary_data = parameters_dictionary[constants.ORG_PARAM_DATA]
 
-        self.species = self.clean_string(parameters_dictionary_description[constants.ORG_PARAM_DESC_SPECIES])
-        self.genus = self.clean_string(parameters_dictionary_description[constants.ORG_PARAM_DESC_GENUS])
-        self.strain = self.clean_string(parameters_dictionary_description[constants.ORG_PARAM_DESC_STRAIN])
-        self.sex = self.clean_string(parameters_dictionary_description[constants.ORG_PARAM_DESC_SEX])
-        self.common = self.clean_string(parameters_dictionary_description[constants.ORG_PARAM_DESC_COMMON_NAME])
+        self.species = clean_string(parameters_dictionary_description[constants.ORG_PARAM_DESC_SPECIES])
+        self.genus = clean_string(parameters_dictionary_description[constants.ORG_PARAM_DESC_GENUS])
+        self.strain = clean_string(parameters_dictionary_description[constants.ORG_PARAM_DESC_STRAIN])
+        self.sex = clean_string(parameters_dictionary_description[constants.ORG_PARAM_DESC_SEX])
+        self.common_name = clean_string(parameters_dictionary_description[constants.ORG_PARAM_DESC_COMMON_NAME])
 
         self.date = datetime.today().strftime("%Y-%m-%d")
         self.origin = parameters_dictionary_description[constants.ORG_PARAM_DESC_ORIGIN]
-        self.performed = parameters_dictionary_data[constants.ORG_PARAM_DATA_PERFORMED_BY]
 
         if parameters_dictionary_data[constants.ORG_PARAM_DATA_GENOME_VERSION] == "":
             self.genome_version = "1.0"
@@ -93,49 +97,43 @@ class SpeciesData:
         self.blastx_path = parameters_dictionary_data[constants.ORG_PARAM_DATA_BLASTX_PATH]
         self.orthofinder_path = parameters_dictionary_data[constants.ORG_PARAM_DATA_ORTHOFINDER_PATH]
 
-        self.blast = self.set_service_attribute(constants.ORG_PARAM_SERVICES_BLAST, parameters_dictionary)
-        self.go = self.set_service_attribute(constants.ORG_PARAM_SERVICES_GO, parameters_dictionary)
+        self.blast = set_service_attribute(constants.ORG_PARAM_SERVICES_BLAST, parameters_dictionary)
+        self.go = set_service_attribute(constants.ORG_PARAM_SERVICES_GO, parameters_dictionary)
 
         self.genus_lowercase = self.genus.lower()
         self.species_lowercase = self.species.lower()
+        self.strain_lowercase = self.strain.lower()
+        self.sex_lowercase = self.sex.lower()
+
         self.genus_uppercase = self.genus[0].upper() + self.genus_lowercase[1:]
         self.genus_species = "{0}_{1}".format(self.genus_lowercase, self.species_lowercase)
+        self.strain_sex = '_'.join(utilities.filter_empty_not_empty_items([self.strain_lowercase, self.sex_lowercase])["not_empty"])
 
-        self.chado_species_name = "{0} {1}".format(self.species, self.sex)
-        self.full_name = ' '.join(utilities.filter_empty_not_empty_items([self.genus_uppercase, self.species, self.strain, self.sex])["not_empty"])
+        self.full_name = ' '.join(utilities.filter_empty_not_empty_items([self.genus_uppercase, self.species_lowercase, self.strain, self.sex])["not_empty"])
         self.full_name_lowercase = self.full_name.lower()
-        self.abbreviation = "_".join(utilities.filter_empty_not_empty_items([self.genus_lowercase[0], self.species, self.strain, self.sex])["not_empty"])
 
         self.species_folder_name = "_".join(utilities.filter_empty_not_empty_items(
             [self.genus_lowercase, self.species_lowercase, self.strain.lower(),
              self.sex.lower()])["not_empty"])
 
-        self.dataset_prefix = None
-        if self.sex is not None or self.sex != "":
-            self.dataset_prefix = self.genus[0].lower() + "_" + self.species_lowercase + "_" + self.sex[0].lower()
-        else:
-            self.dataset_prefix = self.genus[0].lower() + "_" + self.species_lowercase
+        self.dataset_prefix = self.strain_sex
 
-        # Bioblend/Chado IDs for an organism analyses/organisms/datasets/history/library
-        self.org_id = None
-        self.genome_analysis_id = None
-        self.ogs_analysis_id = None
-        self.instance_url = None
-        self.instance = None
-        self.history_id = None
-        self.library = None
-        self.library_id = None
+        self.genome_filename = "{0}_v{1}.fasta".format(self.dataset_prefix, self.genome_version)
+        self.gff_filename = "{0}_OGS{1}_{2}.gff".format(self.dataset_prefix, self.ogs_version, constants.DATA_DATE.replace("-",""))
+        self.transcripts_filename = "{0}_OGS{1}_{2}".format(self.dataset_prefix, self.ogs_version, constants.FILENAME_SUFFIX_TRANSCRIPTS)
+        self.proteins_filename = "{0}_OGS{1}_{2}".format(self.dataset_prefix, self.ogs_version, constants.FILENAME_SUFFIX_PROTEINS)
+        self.interpro_filename = "{0}_OGS{1}_{2}".format(self.dataset_prefix, self.ogs_version, constants.FILENAME_SUFFIX_INTERPRO)
+        self.blastp_filename = "{0}_OGS{1}_{2}".format(self.dataset_prefix, self.ogs_version, constants.FILENAME_SUFFIX_BLASTP)
+        self.blastx_filename = "{0}_OGS{1}_{2}".format(self.dataset_prefix, self.ogs_version, constants.FILENAME_SUFFIX_BLASTX)
+        self.orthofinder_filename = "{0}_OGS{1}_{2}".format(self.dataset_prefix, self.ogs_version, constants.FILENAME_SUFFIX_ORTHOFINDER)
 
         self.script_dir = os.path.dirname(os.path.realpath(sys.argv[0]))
         self.main_dir = None
         self.species_dir = None
-
-        self.tool_panel = None
-        self.datasets = dict()
-        self.datasets_name = dict()
-        self.source_files = dict()
-        self.workflow_name = None
-        self.metadata = dict()
-        self.api_key = None  # API key used to communicate with the galaxy instance. Cannot be used to do user-tied actions
-        self.datasets = dict()
         self.config = None  # Custom config used to set environment variables inside containers
+
+        # # Bioblend/Chado IDs for an organism analyses/organisms/datasets/history/library
+        self.instance_url = None
+        self.instance = None
+        self.history_id = None
+        self.history_name = str(self.genus_species)
diff --git a/templates/gspecies_compose.yml.j2 b/templates/gspecies_compose.yml.j2
index 4f4300c2f7c351690160e2e3b52b06082eb88a85..e3cd0164a682db130bc0f0edf70aebef178e165b 100644
--- a/templates/gspecies_compose.yml.j2
+++ b/templates/gspecies_compose.yml.j2
@@ -144,6 +144,7 @@ services:
             GALAXY_DEFAULT_ADMIN_EMAIL: "{{ galaxy_default_admin_email }}"
             GALAXY_DEFAULT_ADMIN_USER: "{{ galaxy_default_admin_user }}"
             GALAXY_DEFAULT_ADMIN_PASSWORD: "{{ galaxy_default_admin_password }}"
+            GALAXY_DEFAULT_ADMIN_KEY: "{{ galaxy_default_admin_key }}"
             GALAXY_CONFIG_ADMIN_USERS: "admin@galaxy.org,{{ galaxy_default_admin_email }}"   # admin@galaxy.org is the default (leave it), galaxy_default_admin_email is a shared ldap user we use to connect
             ENABLE_FIX_PERMS: 0
             PROXY_PREFIX: /sp/{{ genus_species }}/galaxy
diff --git a/templates/organisms.yml.j2 b/templates/organisms.yml.j2
index fc932ed4b5d3d57ad739f75db031bc2e3d7c5944..429188d1746e78c0ac4c02be7bde2f837d46a26c 100644
--- a/templates/organisms.yml.j2
+++ b/templates/organisms.yml.j2
@@ -22,7 +22,6 @@
     {{ org_param_data_blastx_path }}: {{ org_param_data_blastx_path_value }}
     {{ org_param_data_genome_version }}: {{ org_param_data_genome_version_value }}
     {{ org_param_data_ogs_version }}: {{ org_param_data_ogs_version_value }}
-    {{ org_param_data_performed_by }}: {{ org_param_data_performed_by_value }}
   {{ org_param_services }}:
     {{ org_param_services_blast }}: {{ org_param_services_blast_value }}
     {{ org_param_services_go }}: {{ org_param_services_go_value }}
\ No newline at end of file
diff --git a/utilities.py b/utilities.py
index fa357ca2356afe1271b98fd85f082833ae541814..62da00a6b4f9bffded6c1cacf9b72abe036f6636 100755
--- a/utilities.py
+++ b/utilities.py
@@ -4,9 +4,6 @@
 import yaml
 import logging
 import sys
-import os
-import subprocess
-import bioblend
 import constants
 
 def load_yaml(yaml_file):
@@ -79,47 +76,13 @@ def filter_empty_not_empty_items(li):
             filtered_dict["not_empty"].append(i)
     return filtered_dict
 
+def no_empty_items(li):
 
-def check_galaxy_state(genus_lowercase, species, script_dir):
-    """
-    Read the logs of the galaxy container for the current species to check if the service is "ready"
-
-    :param genus_lowercase:
-    :param species:
-    :param script_dir:
-    :return:
-    """
-
-    # Run supervisorctl status in the galaxy container via serexec
-    # Change serexec permissions in repo
-    try:
-        os.chmod("%s/serexec" % script_dir, 0o0755)
-    except PermissionError:
-        logging.warning("serexec permissions incorrect in %s" % script_dir)
-    galaxy_logs = subprocess.run(["%s/serexec" % script_dir, "{0}_{1}_galaxy".format(genus_lowercase, species),
-                                  "supervisorctl", "status", "galaxy:"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
-    if "galaxy:galaxy_web                RUNNING" in str(galaxy_logs.stdout) \
-            and "galaxy:handler0                  RUNNING" in str(galaxy_logs.stdout) \
-            and "galaxy:handler1                  RUNNING" in str(galaxy_logs.stdout):
-        return 1
-    else:
-        return 0
-
-
-def get_species_history_id(instance, full_name):
-    """
-    Set and return the current species history id in its galaxy instance
-
-    :param instance:
-    :param full_name:
-    :return:
-    """
-
-    histories = instance.histories.get_histories(name=str(full_name))
-    history_id = histories[0]["id"]
-    show_history = instance.histories.show_history(history_id=history_id)
-
-    return [history_id, show_history]
+    empty = True
+    for i in li:
+        if i is None or i == "":
+            empty = False
+    return empty
 
 def get_gspecies_string_from_sp_dict(sp_dict):
 
@@ -147,7 +110,6 @@ def get_unique_species_str_list(sp_dict_list):
                 
     return unique_species_li
 
-
 def get_unique_species_dict_list(sp_dict_list):
     """
     Filter the species dictionary list to return only unique genus_species combinations
@@ -189,44 +151,6 @@ def get_sp_picture(sp_dict_list):
                 sp_picture_dict[gspecies] = sp[constants.ORG_PARAM_DESC][constants.ORG_PARAM_DESC_PICTURE_PATH]
     return sp_picture_dict
 
-def get_sp_jbrowse_links(org_list):
-    """
-    Get the jbrowse links from all strains for each species
-    """
-
-    jbrowse_links_dict = {}
-
-    for org in org_list:
-        gspecies = org.genus_species
-        if org.contig_prefix != "":
-            if gspecies not in jbrowse_links_dict.keys():
-                jbrowse_links_dict[gspecies] = org.contig_prefix + ">" + org.species_folder_name
-            else:
-                jbrowse_links_dict[gspecies] = jbrowse_links_dict[gspecies] + ";" + org.contig_prefix + ">" + org.species_folder_name
-
-    return jbrowse_links_dict
-
-def run_tool(instance, tool_id, history_id, tool_inputs):
-
-    output_dict = None
-    try:
-        logging.debug("Running tool {0} with tool inputs: {1}".format(tool_id, tool_inputs))
-        output_dict =  instance.tools.run_tool(
-            tool_id=tool_id,
-            history_id=history_id,
-            tool_inputs=tool_inputs)
-    except bioblend.ConnectionError:
-        logging.error("Unexpected HTTP response (bioblend.ConnectionError) when running tool {0} with tool inputs: {1}".format(tool_id, tool_inputs))
-
-    return output_dict
-
-def run_tool_and_get_single_output_dataset_id(instance, tool_id, history_id, tool_inputs):
-
-    output_dict = run_tool(instance, tool_id, history_id, tool_inputs)
-    single_output_dataset_id = output_dict["outputs"][0]["id"]
-
-    return single_output_dataset_id
-
 def create_org_param_dict_from_constants():
     """
     Create a dictionary of variables containing the keys needed to render the organisms.yml.j2 (NOT the values)
@@ -256,7 +180,6 @@ def create_org_param_dict_from_constants():
     org_param_dict["org_param_data_blastx_path"] = constants.ORG_PARAM_DATA_BLASTX_PATH
     org_param_dict["org_param_data_genome_version"] = constants.ORG_PARAM_DATA_GENOME_VERSION
     org_param_dict["org_param_data_ogs_version"] = constants.ORG_PARAM_DATA_OGS_VERSION
-    org_param_dict["org_param_data_performed_by"] = constants.ORG_PARAM_DATA_PERFORMED_BY
     org_param_dict["org_param_services"] = constants.ORG_PARAM_SERVICES
     org_param_dict["org_param_services_blast"] = constants.ORG_PARAM_SERVICES_BLAST
     org_param_dict["org_param_services_go"] = constants.ORG_PARAM_SERVICES_GO
diff --git a/utilities_bioblend.py b/utilities_bioblend.py
new file mode 100644
index 0000000000000000000000000000000000000000..08bb1bd8450793a2bb76b0eaca3675e524e31946
--- /dev/null
+++ b/utilities_bioblend.py
@@ -0,0 +1,173 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+import logging
+import sys
+import os
+import subprocess
+import time
+import json
+import bioblend
+from bioblend import galaxy
+
+import utilities
+
+def get_galaxy_instance(instance_url, email, password):
+    """
+    Test the connection to the galaxy instance for the current organism
+    Exit if we cannot connect to the instance
+
+    """
+
+    logging.info("Connecting to the galaxy instance (%s)" % instance_url)
+    instance = galaxy.GalaxyInstance(url=instance_url,
+                                     email=email,
+                                     password=password)
+
+    try:
+        instance.histories.get_histories()
+    except bioblend.ConnectionError:
+        logging.critical("Cannot connect to galaxy instance (%s) " % instance_url)
+        sys.exit()
+    else:
+        logging.info("Successfully connected to galaxy instance (%s) " % instance_url)
+
+    return instance
+
+
+def get_history(instance, history_name):
+    """
+    Create or set the working history to the current species one
+
+    :return:
+    """
+    histories = instance.histories.get_histories(name=str(history_name))
+    if len(histories) == 0:
+        logging.info("Creating history for %s" % history_name)
+        hist_dict = instance.histories.create_history(name=str(history_name))
+        history_id = hist_dict["id"]
+        logging.debug("History ID set for {0}: {1}".format(history_name, history_id))
+    elif len(histories) == 1:
+        history_id = histories[0]["id"]
+        logging.debug("History ID set for {0}: {1}".format(history_name, history_id))
+    else:
+        logging.critical("Multiple histories exists for {0}".format(history_name))
+
+    return history_id
+
+def check_wf_param(full_name, params):
+
+    if not utilities.no_empty_items(params):
+        logging.critical(
+            "One empty workflow parameter found for organism {0}: {1})".format(full_name, params))
+        sys.exit()
+
+def check_galaxy_state(network_name, script_dir):
+    """
+    Read the logs of the galaxy container for the current species to check if the service is "ready"
+    """
+
+    # Run supervisorctl status in the galaxy container via serexec
+    # Change serexec permissions in repo
+    try:
+        os.chmod("%s/serexec" % script_dir, 0o0755)
+    except PermissionError:
+        logging.warning("serexec permissions incorrect in %s" % script_dir)
+    galaxy_logs = subprocess.run(["%s/serexec" % script_dir, "{0}_galaxy".format(network_name),
+                                  "supervisorctl", "status", "galaxy:"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+    if "galaxy:galaxy_web                RUNNING" in str(galaxy_logs.stdout) \
+            and "galaxy:handler0                  RUNNING" in str(galaxy_logs.stdout) \
+            and "galaxy:handler1                  RUNNING" in str(galaxy_logs.stdout):
+        return 1
+    else:
+        return 0
+
+def get_species_history_id(instance, full_name):
+    """
+    Set and return the current species history id in its galaxy instance
+
+    :param instance:
+    :param full_name:
+    :return:
+    """
+
+    histories = instance.histories.get_histories(name=str(full_name))
+    history_id = histories[0]["id"]
+    show_history = instance.histories.show_history(history_id=history_id)
+
+    return [history_id, show_history]
+
+
+def run_tool(instance, tool_id, history_id, tool_inputs):
+
+    output_dict = None
+    try:
+        logging.debug("Running tool {0} with tool inputs: {1}".format(tool_id, tool_inputs))
+        output_dict =  instance.tools.run_tool(
+            tool_id=tool_id,
+            history_id=history_id,
+            tool_inputs=tool_inputs)
+    except bioblend.ConnectionError:
+        logging.error("Unexpected HTTP response (bioblend.ConnectionError) when running tool {0} with tool inputs: {1}".format(tool_id, tool_inputs))
+
+    return output_dict
+
+def run_tool_and_download_single_output_dataset(instance, tool_id, history_id, tool_inputs, time_sleep = 0):
+
+    output_dict = run_tool(instance, tool_id, history_id, tool_inputs)
+    if not time_sleep is None:
+        time.sleep(time_sleep)
+    single_output_dataset_id = output_dict["outputs"][0]["id"]
+    dataset = instance.datasets.download_dataset(dataset_id=single_output_dataset_id)
+
+    return dataset
+
+def install_repository_revision(instance, tool_id, version, changeset_revision):
+
+    tool_dict = instance.tools.show_tool(tool_id)
+    current_version = tool_dict["version"]
+    toolshed_dict = tool_dict["tool_shed_repository"]
+
+    if current_version != version:
+        name = toolshed_dict["name"]
+        owner = toolshed_dict["owner"]
+        toolshed = "https://" + toolshed_dict["tool_shed"]
+        logging.warning("Installing changeset revision {0} for {1}".format(changeset_revision, name))
+
+        instance.toolshed.install_repository_revision(tool_shed_url=toolshed,
+                                                      name=name,
+                                                      owner=owner,
+                                                      changeset_revision=changeset_revision,
+                                                      install_tool_dependencies=True,
+                                                      install_repository_dependencies=False,
+                                                      install_resolver_dependencies=True)
+
+def install_workflow_tools(instance, workflow_path):
+    """
+    Read a .ga file to extract the information about the different tools called.
+    Check if every tool is installed via a "show_tool".
+    If a tool is not installed (versions don't match), send a warning to the logger and install the required changeset (matching the tool version)
+    Doesn't do anything if versions match
+
+    :return:
+    """
+
+    logging.info("Validating that installed tools versions and changesets match workflow versions")
+
+    # Load the workflow file (.ga) in a buffer
+    with open(workflow_path, 'r') as ga_in_file:
+
+        # Then store the decoded json dictionary
+        workflow_dict = json.load(ga_in_file)
+
+        # Look up every "step_id" looking for tools
+        for step in workflow_dict["steps"].values():
+            if step["tool_id"]:
+                # Check if an installed version matches the workflow tool version
+                # (If it's not installed, the show_tool version returned will be a default version with the suffix "XXXX+0")
+                install_repository_revision(tool_id=step["tool_id"],
+                                                               version=step["tool_version"],
+                                                               changeset_revision=step["tool_shed_repository"]["changeset_revision"],
+                                                               instance=instance)
+
+    logging.info("Tools versions and changeset_revisions from workflow validated")
diff --git a/workflows_phaeoexplorer/Galaxy-Workflow-chado_load_tripal_synchronize_jbrowse_1org_v6.ga b/workflows_phaeoexplorer/Galaxy-Workflow-chado_load_tripal_synchronize_jbrowse_1org_v6.ga
new file mode 100644
index 0000000000000000000000000000000000000000..d9dae223daa8783f6dff89a5b1a7725b324613ed
--- /dev/null
+++ b/workflows_phaeoexplorer/Galaxy-Workflow-chado_load_tripal_synchronize_jbrowse_1org_v6.ga
@@ -0,0 +1,519 @@
+{
+    "a_galaxy_workflow": "true",
+    "annotation": "",
+    "format-version": "0.1",
+    "name": "chado_load_tripal_synchronize_jbrowse_1org_v6",
+    "steps": {
+        "0": {
+            "annotation": "",
+            "content_id": null,
+            "errors": null,
+            "id": 0,
+            "input_connections": {},
+            "inputs": [
+                {
+                    "description": "",
+                    "name": "genome"
+                }
+            ],
+            "label": "genome",
+            "name": "Input dataset",
+            "outputs": [],
+            "position": {
+                "bottom": 365.8000030517578,
+                "height": 61.80000305175781,
+                "left": 176.5,
+                "right": 376.5,
+                "top": 304,
+                "width": 200,
+                "x": 176.5,
+                "y": 304
+            },
+            "tool_id": null,
+            "tool_state": "{\"optional\": false}",
+            "tool_version": null,
+            "type": "data_input",
+            "uuid": "40c1e307-2cc6-4914-aebc-76373131708d",
+            "workflow_outputs": [
+                {
+                    "label": null,
+                    "output_name": "output",
+                    "uuid": "42803cc3-a5cc-44e9-b6c8-2456472579cd"
+                }
+            ]
+        },
+        "1": {
+            "annotation": "",
+            "content_id": null,
+            "errors": null,
+            "id": 1,
+            "input_connections": {},
+            "inputs": [
+                {
+                    "description": "",
+                    "name": "annotations"
+                }
+            ],
+            "label": "annotations",
+            "name": "Input dataset",
+            "outputs": [],
+            "position": {
+                "bottom": 455.8000030517578,
+                "height": 61.80000305175781,
+                "left": 207.5,
+                "right": 407.5,
+                "top": 394,
+                "width": 200,
+                "x": 207.5,
+                "y": 394
+            },
+            "tool_id": null,
+            "tool_state": "{\"optional\": false}",
+            "tool_version": null,
+            "type": "data_input",
+            "uuid": "ffb56c28-197b-4029-a678-2108ba95f21f",
+            "workflow_outputs": [
+                {
+                    "label": null,
+                    "output_name": "output",
+                    "uuid": "9aa9cb17-a004-4287-abf4-6ca2c2ad7b66"
+                }
+            ]
+        },
+        "2": {
+            "annotation": "",
+            "content_id": null,
+            "errors": null,
+            "id": 2,
+            "input_connections": {},
+            "inputs": [
+                {
+                    "description": "",
+                    "name": "proteins"
+                }
+            ],
+            "label": "proteins",
+            "name": "Input dataset",
+            "outputs": [],
+            "position": {
+                "bottom": 544.8000030517578,
+                "height": 61.80000305175781,
+                "left": 229.5,
+                "right": 429.5,
+                "top": 483,
+                "width": 200,
+                "x": 229.5,
+                "y": 483
+            },
+            "tool_id": null,
+            "tool_state": "{\"optional\": false}",
+            "tool_version": null,
+            "type": "data_input",
+            "uuid": "018cd90b-1f34-4d37-9919-7767c8c3026d",
+            "workflow_outputs": [
+                {
+                    "label": null,
+                    "output_name": "output",
+                    "uuid": "8623782b-77e2-4eea-bb20-0cf158a503ff"
+                }
+            ]
+        },
+        "3": {
+            "annotation": "",
+            "content_id": "toolshed.g2.bx.psu.edu/repos/gga/chado_feature_load_fasta/feature_load_fasta/2.3.4+galaxy0",
+            "errors": null,
+            "id": 3,
+            "input_connections": {
+                "fasta": {
+                    "id": 0,
+                    "output_name": "output"
+                }
+            },
+            "inputs": [
+                {
+                    "description": "runtime parameter for tool Chado load fasta",
+                    "name": "analysis_id"
+                },
+                {
+                    "description": "runtime parameter for tool Chado load fasta",
+                    "name": "organism"
+                },
+                {
+                    "description": "runtime parameter for tool Chado load fasta",
+                    "name": "wait_for"
+                }
+            ],
+            "label": null,
+            "name": "Chado load fasta",
+            "outputs": [
+                {
+                    "name": "results",
+                    "type": "json"
+                }
+            ],
+            "position": {
+                "bottom": 445,
+                "height": 144,
+                "left": 506.5,
+                "right": 706.5,
+                "top": 301,
+                "width": 200,
+                "x": 506.5,
+                "y": 301
+            },
+            "post_job_actions": {},
+            "tool_id": "toolshed.g2.bx.psu.edu/repos/gga/chado_feature_load_fasta/feature_load_fasta/2.3.4+galaxy0",
+            "tool_shed_repository": {
+                "changeset_revision": "ba4d07fbaf47",
+                "name": "chado_feature_load_fasta",
+                "owner": "gga",
+                "tool_shed": "toolshed.g2.bx.psu.edu"
+            },
+            "tool_state": "{\"analysis_id\": {\"__class__\": \"RuntimeValue\"}, \"do_update\": \"false\", \"ext_db\": {\"db\": \"\", \"re_db_accession\": \"\"}, \"fasta\": {\"__class__\": \"ConnectedValue\"}, \"match_on_name\": \"false\", \"organism\": {\"__class__\": \"RuntimeValue\"}, \"psql_target\": {\"method\": \"remote\", \"__current_case__\": 0}, \"re_name\": \"\", \"re_uniquename\": \"\", \"relationships\": {\"rel_type\": \"none\", \"__current_case__\": 0}, \"sequence_type\": \"contig\", \"wait_for\": {\"__class__\": \"RuntimeValue\"}, \"__page__\": null, \"__rerun_remap_job_id__\": null}",
+            "tool_version": null,
+            "type": "tool",
+            "uuid": "59bb1c33-b93f-49db-941d-66cbdaaf2cb5",
+            "workflow_outputs": [
+                {
+                    "label": null,
+                    "output_name": "results",
+                    "uuid": "cff733d9-3e76-417e-95e2-7d4e2111ff59"
+                }
+            ]
+        },
+        "4": {
+            "annotation": "",
+            "content_id": "toolshed.g2.bx.psu.edu/repos/iuc/jbrowse/jbrowse/1.16.11+galaxy0",
+            "errors": null,
+            "id": 4,
+            "input_connections": {
+                "reference_genome|genome": {
+                    "id": 0,
+                    "output_name": "output"
+                },
+                "track_groups_0|data_tracks_0|data_format|annotation": {
+                    "id": 1,
+                    "output_name": "output"
+                }
+            },
+            "inputs": [
+                {
+                    "description": "runtime parameter for tool JBrowse",
+                    "name": "reference_genome"
+                }
+            ],
+            "label": null,
+            "name": "JBrowse",
+            "outputs": [
+                {
+                    "name": "output",
+                    "type": "html"
+                }
+            ],
+            "position": {
+                "bottom": 681.1999969482422,
+                "height": 205.1999969482422,
+                "left": 493.5,
+                "right": 693.5,
+                "top": 476,
+                "width": 200,
+                "x": 493.5,
+                "y": 476
+            },
+            "post_job_actions": {},
+            "tool_id": "toolshed.g2.bx.psu.edu/repos/iuc/jbrowse/jbrowse/1.16.11+galaxy0",
+            "tool_shed_repository": {
+                "changeset_revision": "4542035c1075",
+                "name": "jbrowse",
+                "owner": "iuc",
+                "tool_shed": "toolshed.g2.bx.psu.edu"
+            },
+            "tool_state": "{\"action\": {\"action_select\": \"create\", \"__current_case__\": 0}, \"gencode\": \"1\", \"jbgen\": {\"defaultLocation\": \"\", \"trackPadding\": \"20\", \"shareLink\": \"true\", \"aboutDescription\": \"\", \"show_tracklist\": \"true\", \"show_nav\": \"true\", \"show_overview\": \"true\", \"show_menu\": \"true\", \"hideGenomeOptions\": \"false\"}, \"plugins\": {\"BlastView\": \"true\", \"ComboTrackSelector\": \"false\", \"GCContent\": \"false\"}, \"reference_genome\": {\"genome_type_select\": \"history\", \"__current_case__\": 1, \"genome\": {\"__class__\": \"RuntimeValue\"}}, \"standalone\": \"minimal\", \"track_groups\": [{\"__index__\": 0, \"category\": \"Annotation\", \"data_tracks\": [{\"__index__\": 0, \"data_format\": {\"data_format_select\": \"gene_calls\", \"__current_case__\": 2, \"annotation\": {\"__class__\": \"RuntimeValue\"}, \"match_part\": {\"match_part_select\": \"false\", \"__current_case__\": 1}, \"index\": \"true\", \"track_config\": {\"track_class\": \"NeatHTMLFeatures/View/Track/NeatFeatures\", \"__current_case__\": 3, \"html_options\": {\"topLevelFeatures\": \"\"}}, \"jbstyle\": {\"style_classname\": \"transcript\", \"style_label\": \"product,name,id\", \"style_description\": \"ec32_ortholog_description,note,description\", \"style_height\": \"10px\", \"max_height\": \"600\"}, \"jbcolor_scale\": {\"color_score\": {\"color_score_select\": \"none\", \"__current_case__\": 0, \"color\": {\"color_select\": \"automatic\", \"__current_case__\": 0}}}, \"jb_custom_config\": {\"option\": []}, \"jbmenu\": {\"track_menu\": [{\"__index__\": 0, \"menu_action\": \"iframeDialog\", \"menu_label\": \"View transcript report\", \"menu_title\": \"Transcript {id}\", \"menu_url\": \"__MENU_URL_ORG__\", \"menu_icon\": \"dijitIconBookmark\"}]}, \"track_visibility\": \"default_on\", \"override_apollo_plugins\": \"False\", \"override_apollo_drag\": \"False\"}}]}], \"uglyTestingHack\": \"\", \"__page__\": null, \"__rerun_remap_job_id__\": null}",
+            "tool_version": null,
+            "type": "tool",
+            "uuid": "212e883c-daeb-4b4f-9bed-c90f130da794",
+            "workflow_outputs": [
+                {
+                    "label": null,
+                    "output_name": "output",
+                    "uuid": "38052de9-61ed-4cdd-8a11-1b1d6dd8a8e6"
+                }
+            ]
+        },
+        "5": {
+            "annotation": "",
+            "content_id": "toolshed.g2.bx.psu.edu/repos/gga/chado_feature_load_gff/feature_load_gff/2.3.4+galaxy0",
+            "errors": null,
+            "id": 5,
+            "input_connections": {
+                "fasta": {
+                    "id": 2,
+                    "output_name": "output"
+                },
+                "gff": {
+                    "id": 1,
+                    "output_name": "output"
+                },
+                "wait_for": {
+                    "id": 3,
+                    "output_name": "results"
+                }
+            },
+            "inputs": [
+                {
+                    "description": "runtime parameter for tool Chado load gff",
+                    "name": "analysis_id"
+                },
+                {
+                    "description": "runtime parameter for tool Chado load gff",
+                    "name": "organism"
+                }
+            ],
+            "label": null,
+            "name": "Chado load gff",
+            "outputs": [
+                {
+                    "name": "results",
+                    "type": "txt"
+                }
+            ],
+            "position": {
+                "bottom": 472.3999938964844,
+                "height": 174.39999389648438,
+                "left": 783.5,
+                "right": 983.5,
+                "top": 298,
+                "width": 200,
+                "x": 783.5,
+                "y": 298
+            },
+            "post_job_actions": {},
+            "tool_id": "toolshed.g2.bx.psu.edu/repos/gga/chado_feature_load_gff/feature_load_gff/2.3.4+galaxy0",
+            "tool_shed_repository": {
+                "changeset_revision": "e9a6d7568817",
+                "name": "chado_feature_load_gff",
+                "owner": "gga",
+                "tool_shed": "toolshed.g2.bx.psu.edu"
+            },
+            "tool_state": "{\"add_only\": \"false\", \"analysis_id\": {\"__class__\": \"RuntimeValue\"}, \"fasta\": {\"__class__\": \"ConnectedValue\"}, \"gff\": {\"__class__\": \"ConnectedValue\"}, \"landmark_type\": \"contig\", \"no_seq_compute\": \"false\", \"organism\": {\"__class__\": \"RuntimeValue\"}, \"prot_naming\": {\"method\": \"regex\", \"__current_case__\": 1, \"re_protein_capture\": \"^mRNA(_.+)$\", \"re_protein\": \"prot\\\\1\"}, \"psql_target\": {\"method\": \"remote\", \"__current_case__\": 0}, \"wait_for\": {\"__class__\": \"ConnectedValue\"}, \"__page__\": null, \"__rerun_remap_job_id__\": null}",
+            "tool_version": null,
+            "type": "tool",
+            "uuid": "7b88e06e-9ab1-4201-a947-e12bb3d2b901",
+            "workflow_outputs": [
+                {
+                    "label": null,
+                    "output_name": "results",
+                    "uuid": "d82c5b1c-323e-466e-b6af-8524f87c09ab"
+                }
+            ]
+        },
+        "6": {
+            "annotation": "",
+            "content_id": "toolshed.g2.bx.psu.edu/repos/gga/jbrowse_to_container/jbrowse_to_container/0.5.1",
+            "errors": null,
+            "id": 6,
+            "input_connections": {
+                "organisms_0|jbrowse": {
+                    "id": 4,
+                    "output_name": "output"
+                }
+            },
+            "inputs": [],
+            "label": null,
+            "name": "Add organisms to JBrowse container",
+            "outputs": [
+                {
+                    "name": "output",
+                    "type": "html"
+                }
+            ],
+            "position": {
+                "bottom": 640,
+                "height": 134,
+                "left": 779.5,
+                "right": 979.5,
+                "top": 506,
+                "width": 200,
+                "x": 779.5,
+                "y": 506
+            },
+            "post_job_actions": {},
+            "tool_id": "toolshed.g2.bx.psu.edu/repos/gga/jbrowse_to_container/jbrowse_to_container/0.5.1",
+            "tool_shed_repository": {
+                "changeset_revision": "11033bdad2ca",
+                "name": "jbrowse_to_container",
+                "owner": "gga",
+                "tool_shed": "toolshed.g2.bx.psu.edu"
+            },
+            "tool_state": "{\"organisms\": [{\"__index__\": 0, \"jbrowse\": {\"__class__\": \"ConnectedValue\"}, \"name\": \"__DISPLAY_NAME_ORG__\", \"advanced\": {\"unique_id\": \"__UNIQUE_ID_ORG__\"}}], \"__page__\": null, \"__rerun_remap_job_id__\": null}",
+            "tool_version": null,
+            "type": "tool",
+            "uuid": "ff93af13-b389-44c7-9a37-e39f216f3619",
+            "workflow_outputs": [
+                {
+                    "label": null,
+                    "output_name": "output",
+                    "uuid": "97986774-449c-4019-9273-aae3f461ff0c"
+                }
+            ]
+        },
+        "7": {
+            "annotation": "",
+            "content_id": "toolshed.g2.bx.psu.edu/repos/gga/tripal_feature_sync/feature_sync/3.2.1.0",
+            "errors": null,
+            "id": 7,
+            "input_connections": {
+                "wait_for": {
+                    "id": 5,
+                    "output_name": "results"
+                }
+            },
+            "inputs": [
+                {
+                    "description": "runtime parameter for tool Synchronize features",
+                    "name": "organism_id"
+                }
+            ],
+            "label": null,
+            "name": "Synchronize features",
+            "outputs": [
+                {
+                    "name": "results",
+                    "type": "txt"
+                }
+            ],
+            "position": {
+                "bottom": 459.3999938964844,
+                "height": 154.39999389648438,
+                "left": 1065.5,
+                "right": 1265.5,
+                "top": 305,
+                "width": 200,
+                "x": 1065.5,
+                "y": 305
+            },
+            "post_job_actions": {},
+            "tool_id": "toolshed.g2.bx.psu.edu/repos/gga/tripal_feature_sync/feature_sync/3.2.1.0",
+            "tool_shed_repository": {
+                "changeset_revision": "64e36c3f0dd6",
+                "name": "tripal_feature_sync",
+                "owner": "gga",
+                "tool_shed": "toolshed.g2.bx.psu.edu"
+            },
+            "tool_state": "{\"organism_id\": {\"__class__\": \"RuntimeValue\"}, \"repeat_ids\": [], \"repeat_types\": [{\"__index__\": 0, \"types\": \"mRNA\"}, {\"__index__\": 1, \"types\": \"polypeptide\"}], \"wait_for\": {\"__class__\": \"ConnectedValue\"}, \"__page__\": null, \"__rerun_remap_job_id__\": null}",
+            "tool_version": null,
+            "type": "tool",
+            "uuid": "281d7aef-81e2-437b-8080-e9c451d67f5b",
+            "workflow_outputs": [
+                {
+                    "label": null,
+                    "output_name": "results",
+                    "uuid": "33bcb916-0427-475f-89d3-4928ae3c0708"
+                }
+            ]
+        },
+        "8": {
+            "annotation": "",
+            "content_id": "toolshed.g2.bx.psu.edu/repos/gga/tripal_db_populate_mviews/db_populate_mviews/3.2.1.0",
+            "errors": null,
+            "id": 8,
+            "input_connections": {
+                "wait_for": {
+                    "id": 7,
+                    "output_name": "results"
+                }
+            },
+            "inputs": [],
+            "label": null,
+            "name": "Populate materialized views",
+            "outputs": [
+                {
+                    "name": "results",
+                    "type": "txt"
+                }
+            ],
+            "position": {
+                "bottom": 458.3999938964844,
+                "height": 154.39999389648438,
+                "left": 1351.5,
+                "right": 1551.5,
+                "top": 304,
+                "width": 200,
+                "x": 1351.5,
+                "y": 304
+            },
+            "post_job_actions": {},
+            "tool_id": "toolshed.g2.bx.psu.edu/repos/gga/tripal_db_populate_mviews/db_populate_mviews/3.2.1.0",
+            "tool_shed_repository": {
+                "changeset_revision": "3c08f32a3dc1",
+                "name": "tripal_db_populate_mviews",
+                "owner": "gga",
+                "tool_shed": "toolshed.g2.bx.psu.edu"
+            },
+            "tool_state": "{\"mview\": \"\", \"wait_for\": {\"__class__\": \"ConnectedValue\"}, \"__page__\": null, \"__rerun_remap_job_id__\": null}",
+            "tool_version": null,
+            "type": "tool",
+            "uuid": "cf37a05e-8125-491b-98be-d8f4df295e16",
+            "workflow_outputs": [
+                {
+                    "label": null,
+                    "output_name": "results",
+                    "uuid": "3c0dbcb4-f832-4293-8c35-ce427b9ec28a"
+                }
+            ]
+        },
+        "9": {
+            "annotation": "",
+            "content_id": "toolshed.g2.bx.psu.edu/repos/gga/tripal_db_index/db_index/3.2.1.1",
+            "errors": null,
+            "id": 9,
+            "input_connections": {
+                "wait_for": {
+                    "id": 8,
+                    "output_name": "results"
+                }
+            },
+            "inputs": [],
+            "label": null,
+            "name": "Index Tripal data",
+            "outputs": [
+                {
+                    "name": "results",
+                    "type": "txt"
+                }
+            ],
+            "position": {
+                "bottom": 417.6000061035156,
+                "height": 113.60000610351562,
+                "left": 1637.5,
+                "right": 1837.5,
+                "top": 304,
+                "width": 200,
+                "x": 1637.5,
+                "y": 304
+            },
+            "post_job_actions": {},
+            "tool_id": "toolshed.g2.bx.psu.edu/repos/gga/tripal_db_index/db_index/3.2.1.1",
+            "tool_shed_repository": {
+                "changeset_revision": "d55a39f12dda",
+                "name": "tripal_db_index",
+                "owner": "gga",
+                "tool_shed": "toolshed.g2.bx.psu.edu"
+            },
+            "tool_state": "{\"expose\": {\"do_expose\": \"no\", \"__current_case__\": 0}, \"queues\": \"10\", \"table\": {\"mode\": \"website\", \"__current_case__\": 0}, \"tokenizer\": \"standard\", \"wait_for\": {\"__class__\": \"ConnectedValue\"}, \"__page__\": null, \"__rerun_remap_job_id__\": null}",
+            "tool_version": null,
+            "type": "tool",
+            "uuid": "ee3698f2-3c4e-456f-8c42-f1ad2aa4feb5",
+            "workflow_outputs": [
+                {
+                    "label": null,
+                    "output_name": "results",
+                    "uuid": "019d0451-503e-406a-97e6-32126b7e5edb"
+                }
+            ]
+        }
+    },
+    "tags": [],
+    "uuid": "167e6d35-4e56-41bc-a324-7f5abb56800f",
+    "version": 1
+}
\ No newline at end of file
diff --git a/workflows_phaeoexplorer/Galaxy-Workflow-chado_load_tripal_synchronize_jbrowse_2org_v6.ga b/workflows_phaeoexplorer/Galaxy-Workflow-chado_load_tripal_synchronize_jbrowse_2org_v6.ga
new file mode 100644
index 0000000000000000000000000000000000000000..854b4b40bc14102c4bd5c488665867348bea5339
--- /dev/null
+++ b/workflows_phaeoexplorer/Galaxy-Workflow-chado_load_tripal_synchronize_jbrowse_2org_v6.ga
@@ -0,0 +1,887 @@
+{
+    "a_galaxy_workflow": "true",
+    "annotation": "",
+    "format-version": "0.1",
+    "name": "chado_load_tripal_synchronize_jbrowse_2org_v6",
+    "steps": {
+        "0": {
+            "annotation": "",
+            "content_id": null,
+            "errors": null,
+            "id": 0,
+            "input_connections": {},
+            "inputs": [
+                {
+                    "description": "",
+                    "name": "genome org1"
+                }
+            ],
+            "label": "genome org1",
+            "name": "Input dataset",
+            "outputs": [],
+            "position": {
+                "bottom": 191.3000030517578,
+                "height": 61.80000305175781,
+                "left": 286,
+                "right": 486,
+                "top": 129.5,
+                "width": 200,
+                "x": 286,
+                "y": 129.5
+            },
+            "tool_id": null,
+            "tool_state": "{\"optional\": false}",
+            "tool_version": null,
+            "type": "data_input",
+            "uuid": "8e998341-e4df-4f2d-961b-092c49694fe1",
+            "workflow_outputs": [
+                {
+                    "label": null,
+                    "output_name": "output",
+                    "uuid": "f94468ea-f18d-4a47-a3ca-cf64ffffe6bc"
+                }
+            ]
+        },
+        "1": {
+            "annotation": "",
+            "content_id": null,
+            "errors": null,
+            "id": 1,
+            "input_connections": {},
+            "inputs": [
+                {
+                    "description": "",
+                    "name": "annotations org1"
+                }
+            ],
+            "label": "annotations org1",
+            "name": "Input dataset",
+            "outputs": [],
+            "position": {
+                "bottom": 281.3000030517578,
+                "height": 61.80000305175781,
+                "left": 285,
+                "right": 485,
+                "top": 219.5,
+                "width": 200,
+                "x": 285,
+                "y": 219.5
+            },
+            "tool_id": null,
+            "tool_state": "{\"optional\": false}",
+            "tool_version": null,
+            "type": "data_input",
+            "uuid": "f4839799-3f48-410d-ada5-d526157b0790",
+            "workflow_outputs": [
+                {
+                    "label": null,
+                    "output_name": "output",
+                    "uuid": "52faf78c-7375-4472-8793-48cca3054ec1"
+                }
+            ]
+        },
+        "2": {
+            "annotation": "",
+            "content_id": null,
+            "errors": null,
+            "id": 2,
+            "input_connections": {},
+            "inputs": [
+                {
+                    "description": "",
+                    "name": "proteins org1"
+                }
+            ],
+            "label": "proteins org1",
+            "name": "Input dataset",
+            "outputs": [],
+            "position": {
+                "bottom": 376.3000030517578,
+                "height": 61.80000305175781,
+                "left": 286,
+                "right": 486,
+                "top": 314.5,
+                "width": 200,
+                "x": 286,
+                "y": 314.5
+            },
+            "tool_id": null,
+            "tool_state": "{\"optional\": false}",
+            "tool_version": null,
+            "type": "data_input",
+            "uuid": "75725990-7e8c-4d3a-9118-22969d43326c",
+            "workflow_outputs": [
+                {
+                    "label": null,
+                    "output_name": "output",
+                    "uuid": "571065f4-e28d-49a1-8f8e-0398eade54d4"
+                }
+            ]
+        },
+        "3": {
+            "annotation": "",
+            "content_id": null,
+            "errors": null,
+            "id": 3,
+            "input_connections": {},
+            "inputs": [
+                {
+                    "description": "",
+                    "name": "genome org2"
+                }
+            ],
+            "label": "genome org2",
+            "name": "Input dataset",
+            "outputs": [],
+            "position": {
+                "bottom": 466.3000030517578,
+                "height": 61.80000305175781,
+                "left": 285,
+                "right": 485,
+                "top": 404.5,
+                "width": 200,
+                "x": 285,
+                "y": 404.5
+            },
+            "tool_id": null,
+            "tool_state": "{\"optional\": false}",
+            "tool_version": null,
+            "type": "data_input",
+            "uuid": "deb8d58d-17b0-4c1e-8664-f2b201ed9232",
+            "workflow_outputs": [
+                {
+                    "label": null,
+                    "output_name": "output",
+                    "uuid": "ffd90c78-5953-461d-ad39-722bc3fa9ad7"
+                }
+            ]
+        },
+        "4": {
+            "annotation": "",
+            "content_id": null,
+            "errors": null,
+            "id": 4,
+            "input_connections": {},
+            "inputs": [
+                {
+                    "description": "",
+                    "name": "annotations org2"
+                }
+            ],
+            "label": "annotations org2",
+            "name": "Input dataset",
+            "outputs": [],
+            "position": {
+                "bottom": 560.3000030517578,
+                "height": 61.80000305175781,
+                "left": 287,
+                "right": 487,
+                "top": 498.5,
+                "width": 200,
+                "x": 287,
+                "y": 498.5
+            },
+            "tool_id": null,
+            "tool_state": "{\"optional\": false}",
+            "tool_version": null,
+            "type": "data_input",
+            "uuid": "208ee21d-2992-4bad-859b-7c0727420dd1",
+            "workflow_outputs": [
+                {
+                    "label": null,
+                    "output_name": "output",
+                    "uuid": "b7e1cc94-80de-4aa2-b7ab-ae905cd8bedc"
+                }
+            ]
+        },
+        "5": {
+            "annotation": "",
+            "content_id": null,
+            "errors": null,
+            "id": 5,
+            "input_connections": {},
+            "inputs": [
+                {
+                    "description": "",
+                    "name": "proteins org2"
+                }
+            ],
+            "label": "proteins org2",
+            "name": "Input dataset",
+            "outputs": [],
+            "position": {
+                "bottom": 650.3000030517578,
+                "height": 61.80000305175781,
+                "left": 288,
+                "right": 488,
+                "top": 588.5,
+                "width": 200,
+                "x": 288,
+                "y": 588.5
+            },
+            "tool_id": null,
+            "tool_state": "{\"optional\": false}",
+            "tool_version": null,
+            "type": "data_input",
+            "uuid": "319e7ffb-1e28-4439-b044-9d86b38454dd",
+            "workflow_outputs": [
+                {
+                    "label": null,
+                    "output_name": "output",
+                    "uuid": "d1afa940-3d0c-4838-9255-b024b8c0bc86"
+                }
+            ]
+        },
+        "6": {
+            "annotation": "",
+            "content_id": "toolshed.g2.bx.psu.edu/repos/gga/chado_feature_load_fasta/feature_load_fasta/2.3.4+galaxy0",
+            "errors": null,
+            "id": 6,
+            "input_connections": {
+                "fasta": {
+                    "id": 0,
+                    "output_name": "output"
+                }
+            },
+            "inputs": [
+                {
+                    "description": "runtime parameter for tool Chado load fasta",
+                    "name": "analysis_id"
+                },
+                {
+                    "description": "runtime parameter for tool Chado load fasta",
+                    "name": "fasta"
+                },
+                {
+                    "description": "runtime parameter for tool Chado load fasta",
+                    "name": "organism"
+                },
+                {
+                    "description": "runtime parameter for tool Chado load fasta",
+                    "name": "wait_for"
+                }
+            ],
+            "label": "Chado load fasta org1",
+            "name": "Chado load fasta",
+            "outputs": [
+                {
+                    "name": "results",
+                    "type": "json"
+                }
+            ],
+            "position": {
+                "bottom": 293.8999938964844,
+                "height": 164.39999389648438,
+                "left": 571,
+                "right": 771,
+                "top": 129.5,
+                "width": 200,
+                "x": 571,
+                "y": 129.5
+            },
+            "post_job_actions": {},
+            "tool_id": "toolshed.g2.bx.psu.edu/repos/gga/chado_feature_load_fasta/feature_load_fasta/2.3.4+galaxy0",
+            "tool_shed_repository": {
+                "changeset_revision": "ba4d07fbaf47",
+                "name": "chado_feature_load_fasta",
+                "owner": "gga",
+                "tool_shed": "toolshed.g2.bx.psu.edu"
+            },
+            "tool_state": "{\"analysis_id\": {\"__class__\": \"RuntimeValue\"}, \"do_update\": \"false\", \"ext_db\": {\"db\": \"\", \"re_db_accession\": \"\"}, \"fasta\": {\"__class__\": \"RuntimeValue\"}, \"match_on_name\": \"false\", \"organism\": {\"__class__\": \"RuntimeValue\"}, \"psql_target\": {\"method\": \"remote\", \"__current_case__\": 0}, \"re_name\": \"\", \"re_uniquename\": \"\", \"relationships\": {\"rel_type\": \"none\", \"__current_case__\": 0}, \"sequence_type\": \"contig\", \"wait_for\": {\"__class__\": \"RuntimeValue\"}, \"__page__\": null, \"__rerun_remap_job_id__\": null}",
+            "tool_version": "2.3.4+galaxy0",
+            "type": "tool",
+            "uuid": "10d4a784-047d-441c-9b0e-4bcd36ac01ca",
+            "workflow_outputs": [
+                {
+                    "label": null,
+                    "output_name": "results",
+                    "uuid": "711bfdfc-0bb9-4a65-a7b9-86a3af339e00"
+                }
+            ]
+        },
+        "7": {
+            "annotation": "",
+            "content_id": "toolshed.g2.bx.psu.edu/repos/iuc/jbrowse/jbrowse/1.16.11+galaxy0",
+            "errors": null,
+            "id": 7,
+            "input_connections": {
+                "reference_genome|genome": {
+                    "id": 0,
+                    "output_name": "output"
+                },
+                "track_groups_0|data_tracks_0|data_format|annotation": {
+                    "id": 1,
+                    "output_name": "output"
+                }
+            },
+            "inputs": [],
+            "label": "JBrowse org1",
+            "name": "JBrowse",
+            "outputs": [
+                {
+                    "name": "output",
+                    "type": "html"
+                }
+            ],
+            "position": {
+                "bottom": 854.6999969482422,
+                "height": 205.1999969482422,
+                "left": 584,
+                "right": 784,
+                "top": 649.5,
+                "width": 200,
+                "x": 584,
+                "y": 649.5
+            },
+            "post_job_actions": {},
+            "tool_id": "toolshed.g2.bx.psu.edu/repos/iuc/jbrowse/jbrowse/1.16.11+galaxy0",
+            "tool_shed_repository": {
+                "changeset_revision": "4542035c1075",
+                "name": "jbrowse",
+                "owner": "iuc",
+                "tool_shed": "toolshed.g2.bx.psu.edu"
+            },
+            "tool_state": "{\"action\": {\"action_select\": \"create\", \"__current_case__\": 0}, \"gencode\": \"1\", \"jbgen\": {\"defaultLocation\": \"\", \"trackPadding\": \"20\", \"shareLink\": \"true\", \"aboutDescription\": \"\", \"show_tracklist\": \"true\", \"show_nav\": \"true\", \"show_overview\": \"true\", \"show_menu\": \"true\", \"hideGenomeOptions\": \"false\"}, \"plugins\": {\"BlastView\": \"true\", \"ComboTrackSelector\": \"false\", \"GCContent\": \"false\"}, \"reference_genome\": {\"genome_type_select\": \"history\", \"__current_case__\": 1, \"genome\": {\"__class__\": \"ConnectedValue\"}}, \"standalone\": \"minimal\", \"track_groups\": [{\"__index__\": 0, \"category\": \"Annotation\", \"data_tracks\": [{\"__index__\": 0, \"data_format\": {\"data_format_select\": \"gene_calls\", \"__current_case__\": 2, \"annotation\": {\"__class__\": \"ConnectedValue\"}, \"match_part\": {\"match_part_select\": \"false\", \"__current_case__\": 1}, \"index\": \"true\", \"track_config\": {\"track_class\": \"NeatHTMLFeatures/View/Track/NeatFeatures\", \"__current_case__\": 3, \"html_options\": {\"topLevelFeatures\": \"\"}}, \"jbstyle\": {\"style_classname\": \"transcript\", \"style_label\": \"product,name,id\", \"style_description\": \"ec32_ortholog_description,note,description\", \"style_height\": \"10px\", \"max_height\": \"600\"}, \"jbcolor_scale\": {\"color_score\": {\"color_score_select\": \"none\", \"__current_case__\": 0, \"color\": {\"color_select\": \"automatic\", \"__current_case__\": 0}}}, \"jb_custom_config\": {\"option\": []}, \"jbmenu\": {\"track_menu\": [{\"__index__\": 0, \"menu_action\": \"iframeDialog\", \"menu_label\": \"View transcript report\", \"menu_title\": \"Transcript {id}\", \"menu_url\": \"__MENU_URL_ORG1__\", \"menu_icon\": \"dijitIconBookmark\"}]}, \"track_visibility\": \"default_on\", \"override_apollo_plugins\": \"False\", \"override_apollo_drag\": \"False\"}}]}], \"uglyTestingHack\": \"\", \"__page__\": null, \"__rerun_remap_job_id__\": null}",
+            "tool_version": "1.16.11+galaxy0",
+            "type": "tool",
+            "uuid": "4ca62273-7b39-4c29-84c1-53e92b776778",
+            "workflow_outputs": [
+                {
+                    "label": null,
+                    "output_name": "output",
+                    "uuid": "37da6f04-24bf-481d-b8be-70a2eedad236"
+                }
+            ]
+        },
+        "8": {
+            "annotation": "",
+            "content_id": "toolshed.g2.bx.psu.edu/repos/iuc/jbrowse/jbrowse/1.16.11+galaxy0",
+            "errors": null,
+            "id": 8,
+            "input_connections": {
+                "reference_genome|genome": {
+                    "id": 3,
+                    "output_name": "output"
+                },
+                "track_groups_0|data_tracks_0|data_format|annotation": {
+                    "id": 4,
+                    "output_name": "output"
+                }
+            },
+            "inputs": [],
+            "label": "JBrowse org2",
+            "name": "JBrowse",
+            "outputs": [
+                {
+                    "name": "output",
+                    "type": "html"
+                }
+            ],
+            "position": {
+                "bottom": 1072.6999969482422,
+                "height": 205.1999969482422,
+                "left": 589,
+                "right": 789,
+                "top": 867.5,
+                "width": 200,
+                "x": 589,
+                "y": 867.5
+            },
+            "post_job_actions": {},
+            "tool_id": "toolshed.g2.bx.psu.edu/repos/iuc/jbrowse/jbrowse/1.16.11+galaxy0",
+            "tool_shed_repository": {
+                "changeset_revision": "4542035c1075",
+                "name": "jbrowse",
+                "owner": "iuc",
+                "tool_shed": "toolshed.g2.bx.psu.edu"
+            },
+            "tool_state": "{\"action\": {\"action_select\": \"create\", \"__current_case__\": 0}, \"gencode\": \"1\", \"jbgen\": {\"defaultLocation\": \"\", \"trackPadding\": \"20\", \"shareLink\": \"true\", \"aboutDescription\": \"\", \"show_tracklist\": \"true\", \"show_nav\": \"true\", \"show_overview\": \"true\", \"show_menu\": \"true\", \"hideGenomeOptions\": \"false\"}, \"plugins\": {\"BlastView\": \"true\", \"ComboTrackSelector\": \"false\", \"GCContent\": \"false\"}, \"reference_genome\": {\"genome_type_select\": \"history\", \"__current_case__\": 1, \"genome\": {\"__class__\": \"ConnectedValue\"}}, \"standalone\": \"minimal\", \"track_groups\": [{\"__index__\": 0, \"category\": \"Annotation\", \"data_tracks\": [{\"__index__\": 0, \"data_format\": {\"data_format_select\": \"gene_calls\", \"__current_case__\": 2, \"annotation\": {\"__class__\": \"ConnectedValue\"}, \"match_part\": {\"match_part_select\": \"false\", \"__current_case__\": 1}, \"index\": \"true\", \"track_config\": {\"track_class\": \"NeatHTMLFeatures/View/Track/NeatFeatures\", \"__current_case__\": 3, \"html_options\": {\"topLevelFeatures\": \"\"}}, \"jbstyle\": {\"style_classname\": \"transcript\", \"style_label\": \"product,name,id\", \"style_description\": \"ec32_ortholog_description,note,description\", \"style_height\": \"10px\", \"max_height\": \"600\"}, \"jbcolor_scale\": {\"color_score\": {\"color_score_select\": \"none\", \"__current_case__\": 0, \"color\": {\"color_select\": \"automatic\", \"__current_case__\": 0}}}, \"jb_custom_config\": {\"option\": []}, \"jbmenu\": {\"track_menu\": [{\"__index__\": 0, \"menu_action\": \"iframeDialog\", \"menu_label\": \"View transcript report\", \"menu_title\": \"Transcript {id}\", \"menu_url\": \"__MENU_URL_ORG2__\", \"menu_icon\": \"dijitIconBookmark\"}]}, \"track_visibility\": \"default_on\", \"override_apollo_plugins\": \"False\", \"override_apollo_drag\": \"False\"}}]}], \"uglyTestingHack\": \"\", \"__page__\": null, \"__rerun_remap_job_id__\": null}",
+            "tool_version": "1.16.11+galaxy0",
+            "type": "tool",
+            "uuid": "c6700a8e-79a1-4057-bb9b-8a858d5ef9f0",
+            "workflow_outputs": [
+                {
+                    "label": null,
+                    "output_name": "output",
+                    "uuid": "cc4407f9-24f8-4948-acb6-442dca380217"
+                }
+            ]
+        },
+        "9": {
+            "annotation": "",
+            "content_id": "toolshed.g2.bx.psu.edu/repos/gga/chado_feature_load_gff/feature_load_gff/2.3.4+galaxy0",
+            "errors": null,
+            "id": 9,
+            "input_connections": {
+                "fasta": {
+                    "id": 2,
+                    "output_name": "output"
+                },
+                "gff": {
+                    "id": 1,
+                    "output_name": "output"
+                },
+                "wait_for": {
+                    "id": 6,
+                    "output_name": "results"
+                }
+            },
+            "inputs": [
+                {
+                    "description": "runtime parameter for tool Chado load gff",
+                    "name": "analysis_id"
+                },
+                {
+                    "description": "runtime parameter for tool Chado load gff",
+                    "name": "fasta"
+                },
+                {
+                    "description": "runtime parameter for tool Chado load gff",
+                    "name": "gff"
+                },
+                {
+                    "description": "runtime parameter for tool Chado load gff",
+                    "name": "organism"
+                },
+                {
+                    "description": "runtime parameter for tool Chado load gff",
+                    "name": "wait_for"
+                }
+            ],
+            "label": "Chado load gff org1",
+            "name": "Chado load gff",
+            "outputs": [
+                {
+                    "name": "results",
+                    "type": "txt"
+                }
+            ],
+            "position": {
+                "bottom": 324.3000030517578,
+                "height": 194.8000030517578,
+                "left": 858,
+                "right": 1058,
+                "top": 129.5,
+                "width": 200,
+                "x": 858,
+                "y": 129.5
+            },
+            "post_job_actions": {},
+            "tool_id": "toolshed.g2.bx.psu.edu/repos/gga/chado_feature_load_gff/feature_load_gff/2.3.4+galaxy0",
+            "tool_shed_repository": {
+                "changeset_revision": "e9a6d7568817",
+                "name": "chado_feature_load_gff",
+                "owner": "gga",
+                "tool_shed": "toolshed.g2.bx.psu.edu"
+            },
+            "tool_state": "{\"add_only\": \"false\", \"analysis_id\": {\"__class__\": \"RuntimeValue\"}, \"fasta\": {\"__class__\": \"RuntimeValue\"}, \"gff\": {\"__class__\": \"RuntimeValue\"}, \"landmark_type\": \"contig\", \"no_seq_compute\": \"false\", \"organism\": {\"__class__\": \"RuntimeValue\"}, \"prot_naming\": {\"method\": \"regex\", \"__current_case__\": 1, \"re_protein_capture\": \"^mRNA(_.+)$\", \"re_protein\": \"prot\\\\1\"}, \"psql_target\": {\"method\": \"remote\", \"__current_case__\": 0}, \"wait_for\": {\"__class__\": \"RuntimeValue\"}, \"__page__\": null, \"__rerun_remap_job_id__\": null}",
+            "tool_version": "2.3.4+galaxy0",
+            "type": "tool",
+            "uuid": "c263696c-1b67-4d55-9f31-b21e1af85503",
+            "workflow_outputs": [
+                {
+                    "label": null,
+                    "output_name": "results",
+                    "uuid": "2381671d-4aac-434f-9888-d8c3865632e2"
+                }
+            ]
+        },
+        "10": {
+            "annotation": "",
+            "content_id": "toolshed.g2.bx.psu.edu/repos/gga/jbrowse_to_container/jbrowse_to_container/0.5.1",
+            "errors": null,
+            "id": 10,
+            "input_connections": {
+                "organisms_0|jbrowse": {
+                    "id": 7,
+                    "output_name": "output"
+                },
+                "organisms_1|jbrowse": {
+                    "id": 8,
+                    "output_name": "output"
+                }
+            },
+            "inputs": [],
+            "label": null,
+            "name": "Add organisms to JBrowse container",
+            "outputs": [
+                {
+                    "name": "output",
+                    "type": "html"
+                }
+            ],
+            "position": {
+                "bottom": 982.3000030517578,
+                "height": 184.8000030517578,
+                "left": 954,
+                "right": 1154,
+                "top": 797.5,
+                "width": 200,
+                "x": 954,
+                "y": 797.5
+            },
+            "post_job_actions": {},
+            "tool_id": "toolshed.g2.bx.psu.edu/repos/gga/jbrowse_to_container/jbrowse_to_container/0.5.1",
+            "tool_shed_repository": {
+                "changeset_revision": "11033bdad2ca",
+                "name": "jbrowse_to_container",
+                "owner": "gga",
+                "tool_shed": "toolshed.g2.bx.psu.edu"
+            },
+            "tool_state": "{\"organisms\": [{\"__index__\": 0, \"jbrowse\": {\"__class__\": \"ConnectedValue\"}, \"name\": \"Undaria pinnatifida Un1f_bizeux female\", \"advanced\": {\"unique_id\": \"undaria_pinnatifida_un1f_bizeux_female\"}}, {\"__index__\": 1, \"jbrowse\": {\"__class__\": \"ConnectedValue\"}, \"name\": \"Undaria pinnatifida Un1m_bizeux male\", \"advanced\": {\"unique_id\": \"undaria_pinnatifida_un1m_bizeux_male\"}}], \"__page__\": null, \"__rerun_remap_job_id__\": null}",
+            "tool_version": "0.5.1",
+            "type": "tool",
+            "uuid": "e62fe493-26d5-4412-9197-538356185143",
+            "workflow_outputs": [
+                {
+                    "label": null,
+                    "output_name": "output",
+                    "uuid": "d2d0f7cf-a133-4869-8b15-1034a3fd89a3"
+                }
+            ]
+        },
+        "11": {
+            "annotation": "",
+            "content_id": "toolshed.g2.bx.psu.edu/repos/gga/tripal_feature_sync/feature_sync/3.2.1.0",
+            "errors": null,
+            "id": 11,
+            "input_connections": {
+                "wait_for": {
+                    "id": 9,
+                    "output_name": "results"
+                }
+            },
+            "inputs": [
+                {
+                    "description": "runtime parameter for tool Synchronize features",
+                    "name": "organism_id"
+                }
+            ],
+            "label": "Synchronize features org1",
+            "name": "Synchronize features",
+            "outputs": [
+                {
+                    "name": "results",
+                    "type": "txt"
+                }
+            ],
+            "position": {
+                "bottom": 286.8999938964844,
+                "height": 154.39999389648438,
+                "left": 1140,
+                "right": 1340,
+                "top": 132.5,
+                "width": 200,
+                "x": 1140,
+                "y": 132.5
+            },
+            "post_job_actions": {},
+            "tool_id": "toolshed.g2.bx.psu.edu/repos/gga/tripal_feature_sync/feature_sync/3.2.1.0",
+            "tool_shed_repository": {
+                "changeset_revision": "64e36c3f0dd6",
+                "name": "tripal_feature_sync",
+                "owner": "gga",
+                "tool_shed": "toolshed.g2.bx.psu.edu"
+            },
+            "tool_state": "{\"organism_id\": {\"__class__\": \"RuntimeValue\"}, \"repeat_ids\": [], \"repeat_types\": [{\"__index__\": 0, \"types\": \"mRNA\"}, {\"__index__\": 1, \"types\": \"polypeptide\"}], \"wait_for\": {\"__class__\": \"ConnectedValue\"}, \"__page__\": null, \"__rerun_remap_job_id__\": null}",
+            "tool_version": "3.2.1.0",
+            "type": "tool",
+            "uuid": "1f416c08-227c-4a78-b87b-7a46de0151f2",
+            "workflow_outputs": [
+                {
+                    "label": null,
+                    "output_name": "results",
+                    "uuid": "5c958e8e-a89c-4e36-98ec-deeae72b785b"
+                }
+            ]
+        },
+        "12": {
+            "annotation": "",
+            "content_id": "toolshed.g2.bx.psu.edu/repos/gga/chado_feature_load_fasta/feature_load_fasta/2.3.4+galaxy0",
+            "errors": null,
+            "id": 12,
+            "input_connections": {
+                "fasta": {
+                    "id": 3,
+                    "output_name": "output"
+                },
+                "wait_for": {
+                    "id": 11,
+                    "output_name": "results"
+                }
+            },
+            "inputs": [
+                {
+                    "description": "runtime parameter for tool Chado load fasta",
+                    "name": "analysis_id"
+                },
+                {
+                    "description": "runtime parameter for tool Chado load fasta",
+                    "name": "organism"
+                }
+            ],
+            "label": "Chado load fasta org2",
+            "name": "Chado load fasta",
+            "outputs": [
+                {
+                    "name": "results",
+                    "type": "json"
+                }
+            ],
+            "position": {
+                "bottom": 548.8999938964844,
+                "height": 164.39999389648438,
+                "left": 585,
+                "right": 785,
+                "top": 384.5,
+                "width": 200,
+                "x": 585,
+                "y": 384.5
+            },
+            "post_job_actions": {},
+            "tool_id": "toolshed.g2.bx.psu.edu/repos/gga/chado_feature_load_fasta/feature_load_fasta/2.3.4+galaxy0",
+            "tool_shed_repository": {
+                "changeset_revision": "ba4d07fbaf47",
+                "name": "chado_feature_load_fasta",
+                "owner": "gga",
+                "tool_shed": "toolshed.g2.bx.psu.edu"
+            },
+            "tool_state": "{\"analysis_id\": {\"__class__\": \"RuntimeValue\"}, \"do_update\": \"false\", \"ext_db\": {\"db\": \"\", \"re_db_accession\": \"\"}, \"fasta\": {\"__class__\": \"ConnectedValue\"}, \"match_on_name\": \"false\", \"organism\": {\"__class__\": \"RuntimeValue\"}, \"psql_target\": {\"method\": \"remote\", \"__current_case__\": 0}, \"re_name\": \"\", \"re_uniquename\": \"\", \"relationships\": {\"rel_type\": \"none\", \"__current_case__\": 0}, \"sequence_type\": \"contig\", \"wait_for\": {\"__class__\": \"ConnectedValue\"}, \"__page__\": null, \"__rerun_remap_job_id__\": null}",
+            "tool_version": "2.3.4+galaxy0",
+            "type": "tool",
+            "uuid": "2baa32f1-0a6d-4e72-a4b5-f268d9592fa0",
+            "workflow_outputs": [
+                {
+                    "label": null,
+                    "output_name": "results",
+                    "uuid": "b3d84509-cc26-4cbb-9a16-eb20db79b980"
+                }
+            ]
+        },
+        "13": {
+            "annotation": "",
+            "content_id": "toolshed.g2.bx.psu.edu/repos/gga/chado_feature_load_gff/feature_load_gff/2.3.4+galaxy0",
+            "errors": null,
+            "id": 13,
+            "input_connections": {
+                "fasta": {
+                    "id": 5,
+                    "output_name": "output"
+                },
+                "gff": {
+                    "id": 4,
+                    "output_name": "output"
+                },
+                "wait_for": {
+                    "id": 12,
+                    "output_name": "results"
+                }
+            },
+            "inputs": [
+                {
+                    "description": "runtime parameter for tool Chado load gff",
+                    "name": "analysis_id"
+                },
+                {
+                    "description": "runtime parameter for tool Chado load gff",
+                    "name": "organism"
+                }
+            ],
+            "label": "Chado load gff org2",
+            "name": "Chado load gff",
+            "outputs": [
+                {
+                    "name": "results",
+                    "type": "txt"
+                }
+            ],
+            "position": {
+                "bottom": 581.3000030517578,
+                "height": 194.8000030517578,
+                "left": 870,
+                "right": 1070,
+                "top": 386.5,
+                "width": 200,
+                "x": 870,
+                "y": 386.5
+            },
+            "post_job_actions": {},
+            "tool_id": "toolshed.g2.bx.psu.edu/repos/gga/chado_feature_load_gff/feature_load_gff/2.3.4+galaxy0",
+            "tool_shed_repository": {
+                "changeset_revision": "e9a6d7568817",
+                "name": "chado_feature_load_gff",
+                "owner": "gga",
+                "tool_shed": "toolshed.g2.bx.psu.edu"
+            },
+            "tool_state": "{\"add_only\": \"false\", \"analysis_id\": {\"__class__\": \"RuntimeValue\"}, \"fasta\": {\"__class__\": \"ConnectedValue\"}, \"gff\": {\"__class__\": \"ConnectedValue\"}, \"landmark_type\": \"contig\", \"no_seq_compute\": \"false\", \"organism\": {\"__class__\": \"RuntimeValue\"}, \"prot_naming\": {\"method\": \"regex\", \"__current_case__\": 1, \"re_protein_capture\": \"^mRNA(_.+)$\", \"re_protein\": \"prot\\\\1\"}, \"psql_target\": {\"method\": \"remote\", \"__current_case__\": 0}, \"wait_for\": {\"__class__\": \"ConnectedValue\"}, \"__page__\": null, \"__rerun_remap_job_id__\": null}",
+            "tool_version": "2.3.4+galaxy0",
+            "type": "tool",
+            "uuid": "164b0876-f86d-4484-9d07-5095d5ccb0bc",
+            "workflow_outputs": [
+                {
+                    "label": null,
+                    "output_name": "results",
+                    "uuid": "efa2f1f5-ca4f-4cf7-8c33-1856fb19a5eb"
+                }
+            ]
+        },
+        "14": {
+            "annotation": "",
+            "content_id": "toolshed.g2.bx.psu.edu/repos/gga/tripal_feature_sync/feature_sync/3.2.1.0",
+            "errors": null,
+            "id": 14,
+            "input_connections": {
+                "wait_for": {
+                    "id": 13,
+                    "output_name": "results"
+                }
+            },
+            "inputs": [
+                {
+                    "description": "runtime parameter for tool Synchronize features",
+                    "name": "organism_id"
+                }
+            ],
+            "label": "Synchronize features org2",
+            "name": "Synchronize features",
+            "outputs": [
+                {
+                    "name": "results",
+                    "type": "txt"
+                }
+            ],
+            "position": {
+                "bottom": 546.8999938964844,
+                "height": 154.39999389648438,
+                "left": 1149,
+                "right": 1349,
+                "top": 392.5,
+                "width": 200,
+                "x": 1149,
+                "y": 392.5
+            },
+            "post_job_actions": {},
+            "tool_id": "toolshed.g2.bx.psu.edu/repos/gga/tripal_feature_sync/feature_sync/3.2.1.0",
+            "tool_shed_repository": {
+                "changeset_revision": "64e36c3f0dd6",
+                "name": "tripal_feature_sync",
+                "owner": "gga",
+                "tool_shed": "toolshed.g2.bx.psu.edu"
+            },
+            "tool_state": "{\"organism_id\": {\"__class__\": \"RuntimeValue\"}, \"repeat_ids\": [], \"repeat_types\": [{\"__index__\": 0, \"types\": \"mRNA\"}, {\"__index__\": 1, \"types\": \"polypeptide\"}], \"wait_for\": {\"__class__\": \"ConnectedValue\"}, \"__page__\": null, \"__rerun_remap_job_id__\": null}",
+            "tool_version": "3.2.1.0",
+            "type": "tool",
+            "uuid": "faf70f30-9b78-4c62-a683-72646eebf54c",
+            "workflow_outputs": [
+                {
+                    "label": null,
+                    "output_name": "results",
+                    "uuid": "f3f6c32c-f490-46a9-b5ee-03ffdae9298f"
+                }
+            ]
+        },
+        "15": {
+            "annotation": "",
+            "content_id": "toolshed.g2.bx.psu.edu/repos/gga/tripal_db_populate_mviews/db_populate_mviews/3.2.1.0",
+            "errors": null,
+            "id": 15,
+            "input_connections": {
+                "wait_for": {
+                    "id": 14,
+                    "output_name": "results"
+                }
+            },
+            "inputs": [],
+            "label": null,
+            "name": "Populate materialized views",
+            "outputs": [
+                {
+                    "name": "results",
+                    "type": "txt"
+                }
+            ],
+            "position": {
+                "bottom": 442.8999938964844,
+                "height": 154.39999389648438,
+                "left": 1433,
+                "right": 1633,
+                "top": 288.5,
+                "width": 200,
+                "x": 1433,
+                "y": 288.5
+            },
+            "post_job_actions": {},
+            "tool_id": "toolshed.g2.bx.psu.edu/repos/gga/tripal_db_populate_mviews/db_populate_mviews/3.2.1.0",
+            "tool_shed_repository": {
+                "changeset_revision": "3c08f32a3dc1",
+                "name": "tripal_db_populate_mviews",
+                "owner": "gga",
+                "tool_shed": "toolshed.g2.bx.psu.edu"
+            },
+            "tool_state": "{\"mview\": \"\", \"wait_for\": {\"__class__\": \"ConnectedValue\"}, \"__page__\": null, \"__rerun_remap_job_id__\": null}",
+            "tool_version": "3.2.1.0",
+            "type": "tool",
+            "uuid": "1235ccd7-7507-4ec7-ac92-1871a555367b",
+            "workflow_outputs": [
+                {
+                    "label": null,
+                    "output_name": "results",
+                    "uuid": "d853efc1-3b8e-47c2-bff4-052a35733dc6"
+                }
+            ]
+        },
+        "16": {
+            "annotation": "",
+            "content_id": "toolshed.g2.bx.psu.edu/repos/gga/tripal_db_index/db_index/3.2.1.1",
+            "errors": null,
+            "id": 16,
+            "input_connections": {
+                "wait_for": {
+                    "id": 15,
+                    "output_name": "results"
+                }
+            },
+            "inputs": [],
+            "label": null,
+            "name": "Index Tripal data",
+            "outputs": [
+                {
+                    "name": "results",
+                    "type": "txt"
+                }
+            ],
+            "position": {
+                "bottom": 403.1000061035156,
+                "height": 113.60000610351562,
+                "left": 1709,
+                "right": 1909,
+                "top": 289.5,
+                "width": 200,
+                "x": 1709,
+                "y": 289.5
+            },
+            "post_job_actions": {},
+            "tool_id": "toolshed.g2.bx.psu.edu/repos/gga/tripal_db_index/db_index/3.2.1.1",
+            "tool_shed_repository": {
+                "changeset_revision": "d55a39f12dda",
+                "name": "tripal_db_index",
+                "owner": "gga",
+                "tool_shed": "toolshed.g2.bx.psu.edu"
+            },
+            "tool_state": "{\"expose\": {\"do_expose\": \"no\", \"__current_case__\": 0}, \"queues\": \"10\", \"table\": {\"mode\": \"website\", \"__current_case__\": 0}, \"tokenizer\": \"standard\", \"wait_for\": {\"__class__\": \"ConnectedValue\"}, \"__page__\": null, \"__rerun_remap_job_id__\": null}",
+            "tool_version": "3.2.1.1",
+            "type": "tool",
+            "uuid": "5f521ad7-3f0b-417e-9c25-5dc835caf6b2",
+            "workflow_outputs": [
+                {
+                    "label": null,
+                    "output_name": "results",
+                    "uuid": "e83e6184-8f77-416e-9d07-d831d48f5279"
+                }
+            ]
+        }
+    },
+    "tags": [],
+    "uuid": "87110692-c234-47f2-a26b-fda6dcac4f7a",
+    "version": 3
+}
\ No newline at end of file