#!/usr/bin/env python3 # -*- coding: utf-8 -*- import bioblend import argparse import os import subprocess import logging import sys import fnmatch import time import json import re import stat import shutil from bioblend.galaxy.objects import GalaxyInstance from bioblend import galaxy import utilities import speciesData """ gga_get_data.py Usage: $ python3 gga_get_data.py -i input_example.yml --config config.yml [OPTIONS] """ class GetData(speciesData.SpeciesData): """ Child of SpeciesData Contains methods and attributes to copy data into the src_data subfolders of an organism """ def goto_species_dir(self): """ Go to the species directory (starting from the main dir) :return: """ os.chdir(self.main_dir) species_dir = os.path.join(self.main_dir, self.genus_species) + "/" try: os.chdir(species_dir) except OSError: logging.critical("Cannot access %s" % species_dir) sys.exit(0) return 1 def get_source_data_files_from_path(self): """ Find source data files and copy them into the src_data dir tree :return: """ try: os.chdir(self.species_dir) except OSError: logging.critical("Cannot access " + self.species_dir) sys.exit(0) organism_annotation_dir = os.path.abspath("./src_data/annotation/{0}/OGS{1}".format(self.species_folder_name, self.genome_version)) organism_genome_dir = os.path.abspath("./src_data/genome/{0}/v{1}".format(self.species_folder_name, self.genome_version)) datasets_to_get = {"genome_path": self.genome_path, "gff_path": self.gff_path, "transcripts_path": self.transcripts_path, "proteins_path": self.proteins_path, "interpro_path": self.interpro_path, "orthofinder_path": self.orthofinder_path, "blastp_path": self.blastp_path, "blastx_path": self.blastx_path} genome_datasets = ["genome_path"] annotation_datasets = ["gff_path", "transcripts_path", "proteins_path", "orthofinder_path", "interpro_path", "blastp_path", "blastx_path"] # Where to store blast results? # search_excluded_datasets = ["interpro_path", "orthofinder_path", "blastp_path", "blastx_path"] # # These datasets will not be searched if missing in the input file # Copy datasets in the organism src_data dir tree correct folder for k, v in datasets_to_get.items(): if k in genome_datasets: logging.info("Copying {0} into {1}".format(v, organism_genome_dir)) try: shutil.copyfile(os.path.abspath(v), os.path.join(organism_genome_dir, os.path.basename(v))) except Exception as exc: logging.warning("Could not copy {1} ({2})".format(v, exc)) elif k in annotation_datasets: logging.info("Copying {0} into {1}".format(v, organism_annotation_dir)) try: shutil.copyfile(os.path.abspath(v), os.path.join(organism_annotation_dir, os.path.basename(v))) except Exception as exc: logging.warning("Could not copy {1} ({2})".format(v, exc)) else: pass os.chdir(self.main_dir) if __name__ == "__main__": parser = argparse.ArgumentParser(description="Automatic data loading in containers and interaction " "with galaxy instances for GGA" ", following the protocol @ " "http://gitlab.sb-roscoff.fr/abims/e-infra/gga") parser.add_argument("input", type=str, help="Input file (yml)") parser.add_argument("-v", "--verbose", help="Increase output verbosity", action="store_false") parser.add_argument("--config", type=str, help="Config path, default to the 'config' file inside the script repository") parser.add_argument("--main-directory", type=str, help="Where the stack containers will be located, defaults to working directory") args = parser.parse_args() if args.verbose: logging.basicConfig(level=logging.DEBUG) else: logging.basicConfig(level=logging.INFO) logging.getLogger("urllib3").setLevel(logging.WARNING) # Parsing the config file if provided, using the default config otherwise if not args.config: args.config = os.path.join(os.path.dirname(os.path.realpath(sys.argv[0])), "config") else: args.config = os.path.abspath(args.config) if not args.main_directory: args.main_directory = os.getcwd() else: args.main_directory = os.path.abspath(args.main_directory) sp_dict_list = utilities.parse_input(args.input) for sp_dict in sp_dict_list: # Creating an instance of get_data_for_current_species object get_data_for_current_species = GetData(parameters_dictionary=sp_dict) # Starting logging.info("gga_load_data.py called for %s" % get_data_for_current_species.full_name) # Setting some of the instance attributes get_data_for_current_species.main_dir = args.main_directory get_data_for_current_species.species_dir = os.path.join(get_data_for_current_species.main_dir, get_data_for_current_species.genus_species + "/") # Parse the config yaml file get_data_for_current_species.config = utilities.parse_config(args.config) # Change serexec permissions in repo try: os.chmod("%s/serexec" % get_data_for_current_species.script_dir, 0o0777) except PermissionError: logging.critical("Cannot access %s, exiting" % get_data_for_current_species.script_dir) # Load config file get_data_for_current_species.config = utilities.parse_config(args.config) # Retrieve datasets logging.info("Finding and copying datasets for %s" % get_data_for_current_species.full_name) get_data_for_current_species.get_source_data_files_from_path() logging.info("Sucessfully copied datasets for %s" % get_data_for_current_species.full_name) # Format fasta headers (proteins) # logging.info("Formatting fasta files headers %s " % get_data_for_current_species.full_name) # get_data_for_current_species.batch_modify_fasta_headers() # logging.info("Successfully formatted files headers %s " % get_data_for_current_species.full_name) logging.info("Data successfully loaded and imported for %s" % get_data_for_current_species.full_name)