diff options
| -rw-r--r-- | Code/configure.py | 113 | ||||
| -rw-r--r-- | Code/count_runs.py | 27 | ||||
| -rw-r--r-- | Code/getTF.py | 19 | ||||
| -rw-r--r-- | Code/mergeTPM.py | 96 | ||||
| -rw-r--r-- | Code/merge_edges.py | 4 | ||||
| -rw-r--r-- | Code/requirements.txt | 6 | ||||
| -rw-r--r-- | Code/slice_TPM_to_JSON.py | 4 | 
7 files changed, 211 insertions, 58 deletions
| diff --git a/Code/configure.py b/Code/configure.py index 256cf73..148bcd5 100644 --- a/Code/configure.py +++ b/Code/configure.py @@ -1,56 +1,57 @@ -# From get_TPM_by_salmon.py
 -SALMON          = '/home/lanhui/brain/Salmon/Salmon-0.7.2_linux_x86_64/bin/salmon' # salmon software path
 -SALMON_INDEX    = '/home/lanhui/brain/Salmon/salmon_index'
 -TRANSCRIPTOME   = '/home/lanhui/brain/Salmon/Arabidopsis_thaliana.TAIR10.cdna.all.fa'
 -SALMON_MAP_RESULT_DIR = '../Data/temp/salmon_map_result'
 -KMER            = 31
 -
 -# From download_and_map.py
 -DAILY_MAP_NUMBER = 10   # download this many samples each time.  I have tested the values of 3, 4, 5, 8, 9.
 -MIN_FASTQ_FILE_SIZE = 200000000    # in bytes, approximately 200MB
 -RNA_SEQ_INFO_FILE = '../Data/information/rnaseq_info_database.json'  # some data downloaded from ENA are not RNA-seq (they are ChIP-seq). Use this file to tell whether the file is RNA-seq
 -DOWNLOADED_SRA_ID_LOG_FILE = '../Data/log/download_log.txt' # a list of downloaded SRA IDs
 -IGNORED_SRA_ID_LOG_FILE = '../Data/log/download_log_small_sized_ids.txt'  # store SRA IDs with small file size.
 -MAPPED_RDATA_DIR = '../Data/R/Mapped/public'          # mapped RNA-seq (file names ended with _quant.txt) go here
 -RAW_RDATA_DIR    = '/disk1/Data/R/Raw'                # downloaded files go here, was "../Data/R/Raw" (now almost full).
 -
 -# From update_network.py
 -# Don'T change the following paths and names
 -HISTORY_DIR       = '../Data/history/edges/many_targets' # each edge file contains edges for many targets
 -HISTORY_DIR2      = '../Data/history/edges/one_target'   # edges.txt.* files are here, all edge files have the name edges.txt.*, the leading string 'edges.txt' must be present.
 -TIMESTAMP_FILE    = '../Data/log/file_timestamp.txt'     # record last modified time of several important files
 -SAMPLE_SIZE_FILE  = '../Data/log/total.samples.txt'      # each line contains a date and the number of samples on and after that date
 -TEMP_DIR          = '../Data/temp'
 -
 -PARAMETER_FOR_BUILDCMATRIX = '../Data/parameter/parameter_for_buildCmatrix.txt'
 -PARAMETER_FOR_BUILDRMATRIX = '../Data/parameter/parameter_for_buildRmatrix.txt'
 -PARAMETER_FOR_NET          = '../Data/parameter/parameter_for_net.txt'
 -PARAMETER_FOR_NET_TRAVADB_STRESS      = '../Data/parameter/parameter_for_net_travadb_stress.txt'
 -PARAMETER_FOR_NET_TRAVADB_MAP         = '../Data/parameter/parameter_for_net_travadb_map.txt'
 -PARAMETER_FOR_NET_MILD_DROUGHT        = '../Data/parameter/parameter_for_net_mild_drought.txt'
 -PARAMETER_FOR_NET_WIGGELAB_DIURNAL    = '../Data/parameter/parameter_for_net_wiggelab_diurnal.txt'
 -
 -BINDING_FILE               = '../Data/history/bind/binding.txt'
 -TPM_FILE                   = '../Data/history/expr/TPM.txt' # gene expression data
 -
 -BUILDRMATRIX_RENEW_INTERVAL = 28 # check every 28 days for updating TPM.txt
 -MIN_RNA_SEQ_INCREASE = -999 # minimum RNA-seq experiments needed when updating parameter_for_buildRmatrix.txt
 -UPDATE_NETWORK_LOG_FILE  = '../Data/log/update.network.log.txt' # network update log. We should check this file from time to time.
 -NEW_OR_UPDATED_CHIP_FILE = '../Data/log/new.or.updated.chip.file.txt'
 -
 -RNA_SEQ_INFO_DATABASE   = '../Data/information/rnaseq_info_database.txt' # same as RNA_SEQ_INFO_FILE
 -RNA_SEQ_INFO_DATABASE_JSON   = '../Data/information/rnaseq_info_database.json'
 -
 -GENE_ID_FIRST_TWO_LETTERS = 'AT'
 -MEMORY_STRENGTH = 365 # memory retention power (larger value means better memory)
 -
 -#
 -MAPPED_CDATA_DIR = '../Data/C/Mapped' # mapped ChIp-seq data
 -
 -# Used in merge_edges.py
 -EDGE_POOL_DIR = '/disk1/edge_pool'
 -MERGED_EDGE_FILE = '../Data/temp/edges.txt'
 -SQLITE_EDGE_FILE = '../Data/temp/edges.sqlite'
 -DIFF_EDGE_FILE = '../Data/temp/edges-diff.txt' # the difference between two edge files from yesterday and from today
 -
 -TARGET_TF_FILE = '../Data/information/target_tf.txt'
 +# From get_TPM_by_salmon.py +SALMON          = '/home/lanhui/brain/Salmon/Salmon-0.7.2_linux_x86_64/bin/salmon' # salmon software path +SALMON_INDEX    = '/home/lanhui/brain/Salmon/salmon_index' +TRANSCRIPTOME   = '/home/lanhui/brain/Salmon/Arabidopsis_thaliana.TAIR10.cdna.all.fa' +SALMON_MAP_RESULT_DIR = '../Data/temp/salmon_map_result' +KMER            = 31 + +# From download_and_map.py +DAILY_MAP_NUMBER = 10   # download this many samples each time.  I have tested the values of 3, 4, 5, 8. +MIN_FASTQ_FILE_SIZE = 200000000    # in bytes, approximately 200MB +RNA_SEQ_INFO_FILE = '../Data/information/rnaseq_info_database.json'  # some data downloaded from ENA are not RNA-seq (they are ChIP-seq). Use this file to tell whether the file is RNA-seq +DOWNLOADED_SRA_ID_LOG_FILE = '../Data/log/download_log.txt' # a list of downloaded SRA IDs +IGNORED_SRA_ID_LOG_FILE = '../Data/log/download_log_small_sized_ids.txt'  # store SRA IDs with small file size. +MAPPED_RDATA_DIR = '../Data/R/Mapped/public'          # mapped RNA-seq (file names ended with _quant.txt) go here +RAW_RDATA_DIR    = '/disk1/Data/R/Raw'                # downloaded files go here, was "../Data/R/Raw" (now almost full). + +# From update_network.py +# Don'T change the following paths and names +HISTORY_DIR       = '../Data/history/edges/many_targets' # each edge file contains edges for many targets +HISTORY_DIR2      = '../Data/history/edges/one_target'   # edges.txt.* files are here, all edge files have the name edges.txt.*, the leading string 'edges.txt' must be present. +TIMESTAMP_FILE    = '../Data/log/file_timestamp.txt'     # record last modified time of several important files +SAMPLE_SIZE_FILE  = '../Data/log/total.samples.txt'      # each line contains a date and the number of samples on and after that date +TEMP_DIR          = '../Data/temp' + +PARAMETER_FOR_BUILDCMATRIX = '../Data/parameter/parameter_for_buildCmatrix.txt' +PARAMETER_FOR_BUILDRMATRIX = '../Data/parameter/parameter_for_buildRmatrix.txt' +PARAMETER_FOR_NET          = '../Data/parameter/parameter_for_net.txt' +PARAMETER_FOR_NET_TRAVADB_STRESS      = '../Data/parameter/parameter_for_net_travadb_stress.txt' +PARAMETER_FOR_NET_TRAVADB_MAP         = '../Data/parameter/parameter_for_net_travadb_map.txt' +PARAMETER_FOR_NET_MILD_DROUGHT        = '../Data/parameter/parameter_for_net_mild_drought.txt' +PARAMETER_FOR_NET_WIGGELAB_DIURNAL    = '../Data/parameter/parameter_for_net_wiggelab_diurnal.txt' + +BINDING_FILE               = '../Data/history/bind/binding.txt' +TPM_FILE                   = '../Data/history/expr/TPM.txt' # gene expression data + +BUILDRMATRIX_RENEW_INTERVAL = 28 # check every 28 days for updating TPM.txt +MIN_RNA_SEQ_INCREASE = -999 # minimum RNA-seq experiments needed when updating parameter_for_buildRmatrix.txt +UPDATE_NETWORK_LOG_FILE  = '../Data/log/update.network.log.txt' # network update log. We should check this file from time to time. +NEW_OR_UPDATED_CHIP_FILE = '../Data/log/new.or.updated.chip.file.txt' + +RNA_SEQ_INFO_DATABASE   = '../Data/information/rnaseq_info_database.txt' # same as RNA_SEQ_INFO_FILE +RNA_SEQ_INFO_DATABASE_JSON   = '../Data/information/rnaseq_info_database.json' + +GENE_ID_FIRST_TWO_LETTERS = 'AT' +MEMORY_STRENGTH = 365 # memory retention power (larger value means better memory) + +# +MAPPED_CDATA_DIR = '../Data/C/Mapped' # mapped ChIp-seq data + +# Used in merge_edges.py +EDGE_POOL_DIR = '../Data/history/edge_pool' +MERGED_EDGE_FILE = '../Data/temp/edges.txt' +SQLITE_EDGE_FILE = '../Data/temp/edges.sqlite' +DIFF_EDGE_FILE = '../Data/temp/edges-diff.txt' # the difference between two edge files from yesterday and from today + +TARGET_TF_FILE = '../Data/information/target_tf.txt' +>>>>>>> 837a291e6a1816920c7116410dd1e0df9fd3eaf7 diff --git a/Code/count_runs.py b/Code/count_runs.py new file mode 100644 index 0000000..c254c31 --- /dev/null +++ b/Code/count_runs.py @@ -0,0 +1,27 @@ +# Purpose: count the total number of unique run IDs in all TPM files +# Usage: python3 count_runs.py +# 16 Aug 2024, zjnu, hui + +import glob, gzip + +runs = set() + +for filename in glob.glob('../Data/history/expr/TPM*'): +    print(filename) +    if filename.endswith('txt'): +        with open(filename) as f: +            line = f.readlines()[0] +            line = line.strip() +            lst = line.split('\t') +            for runid in lst[1:]: +                runs.add(runid) +    elif filename.endswith('gz'): +        with gzip.open(filename, 'rt') as f: +            line = f.readlines()[0] +            line = line.strip() +            lst = line.split('\t') +            for runid in lst[1:]: +                runs.add(runid) + +print(runs) +print('Total unique run IDs: %d' % len(runs)) diff --git a/Code/getTF.py b/Code/getTF.py new file mode 100644 index 0000000..1090bd2 --- /dev/null +++ b/Code/getTF.py @@ -0,0 +1,19 @@ +import json + +tfs = set() + +with open('../Data/information/target_tf.txt') as f: +    for line in f: +        line = line.strip() +        lst = line.split('\t') +        tf = lst[1] +        tfs.add(tf) + +with open('../Data/information/target_tf_agris.txt') as f: +    for line in f: +        line = line.strip() +        lst = line.split('\t') +        tf = lst[1] +        tfs.add(tf) + +print(json.dumps(sorted(list(tfs)))) diff --git a/Code/mergeTPM.py b/Code/mergeTPM.py new file mode 100644 index 0000000..f37f438 --- /dev/null +++ b/Code/mergeTPM.py @@ -0,0 +1,96 @@ +# Purpose: merge all TPM files in PATH_TO_TPM and put the merged file to PATH_TO_TPM/assemble/ +# Usage: python3 mergeTPM.py +# 17 Aug 2024, zjnu, hui + +import os, glob, gzip + +PATH_TO_TPM = '../Data/history/expr/' + +tpm_files_to_include = glob.glob(os.path.join(PATH_TO_TPM, 'TPM*')) +#tpm_files_to_include = ['../Data/history/expr/TPM.inhouse.diurnal.txt', '../Data/history/expr/TPM.20190919.txt'] + +# Get all run IDs +run_ids = set() +for filename in tpm_files_to_include: +    print(filename) +    if filename.endswith('txt'): +        with open(filename) as f: +            line = f.readlines()[0] +            line = line.strip() +            lst = line.split('\t') +            for runid in lst[1:]: +                run_ids.add(runid) +    elif filename.endswith('gz'): +        with gzip.open(filename, 'rt') as f: +            line = f.readlines()[0] +            line = line.strip() +            lst = line.split('\t') +            for runid in lst[1:]: +                run_ids.add(runid) + +print('Total unique run IDs: %d' % len(run_ids)) + +# Get gene IDs +gene_ids = [] +with open(os.path.join(PATH_TO_TPM, 'TPM.txt')) as f: +    for line in f: +        line = line.strip() +        if line.startswith('AT'): +            lst = line.split('\t') +            gene_ids.append(lst[0]) + +assert len(gene_ids) == 33602 + + +# Assemble gene expressions +g = {} # {'run':{'g1':1, 'g2':2}} +def populate_dictionary(d, lines): +    line = lines[0].strip() +    runs = line.split('\t')[1:] +    for line in lines[1:]: +        line = line.strip() +        lst = line.split('\t') +        gene = lst[0] +        expression_levels = lst[1:] +        run_index = 0 +        for x in expression_levels: +            run = runs[run_index] +            if not run in d: +                d[run] = {} +            d[run][gene] = x +            run_index += 1 +     + +for filename in tpm_files_to_include: +    print('Assemble ' + filename) +    if filename.endswith('txt'): +        with open(filename) as f: +            lines = f.readlines() +            populate_dictionary(g, lines)             +    elif filename.endswith('gz'): +        with gzip.open(filename, 'rt') as f: +            lines = f.readlines() +            populate_dictionary(g, lines)             +     + +# Write to TPM.assemble.number.txt +run_ids_sorted = sorted(list(run_ids)) +assemble_dir = os.path.join(PATH_TO_TPM, 'assemble') +if not os.path.exists(assemble_dir): +    os.mkdir(assemble_dir) +fname = os.path.join(assemble_dir, 'assemble.%d.txt' % (len(run_ids))) +print(f'Write to {fname}') +with open(fname, 'w') as f: +    # write first line +    lst1 = ['gene_id'] + run_ids_sorted +    f.write('\t'.join(lst1) + '\n') +    for gene in gene_ids: +        lst2 = [gene] +        for run in run_ids_sorted: +            if gene in g[run]: +                lst2.append(g[run][gene]) +            else: +                lst2.append('NA') +        f.write('\t'.join(lst2) + '\n') +        assert len(lst1) == len(lst2) + diff --git a/Code/merge_edges.py b/Code/merge_edges.py index 6bbd2f0..872faa9 100644 --- a/Code/merge_edges.py +++ b/Code/merge_edges.py @@ -23,6 +23,7 @@  import os, operator, sys, math, datetime, glob  from log import write_log_file  from configure import EDGE_POOL_DIR, MERGED_EDGE_FILE, SQLITE_EDGE_FILE, UPDATE_NETWORK_LOG_FILE +from utils import make_paths  import sqlite3  def get_number_of_RNAseq_ids(s): @@ -134,6 +135,9 @@ def make_new_edge(d):  ##main + +make_paths(EDGE_POOL_DIR) +  write_log_file('[merge_edges.py] Go through all edge files in the edge pool %s.' % (EDGE_POOL_DIR) , UPDATE_NETWORK_LOG_FILE)  d = {} # d will contain all edges computed so far, where the key is TargetGeneID_TFGeneID, and the value is a list of tuples.  Each tuple is a historical edge.  file_count = 0 diff --git a/Code/requirements.txt b/Code/requirements.txt new file mode 100644 index 0000000..bd9cf96 --- /dev/null +++ b/Code/requirements.txt @@ -0,0 +1,6 @@ +# install command: pip3 install -r requirements.txt -i https://pypi.tuna.tsinghua.edu.cn/simple +networkx +flask +pylablib +pyBigWig +scipy diff --git a/Code/slice_TPM_to_JSON.py b/Code/slice_TPM_to_JSON.py index e597b78..7509f00 100644 --- a/Code/slice_TPM_to_JSON.py +++ b/Code/slice_TPM_to_JSON.py @@ -127,7 +127,7 @@ def make_json_file(expr_dict, dir_name, glb_param_dict):  def make_json_file_using_r(dir_name, glb_param_dict): # use r script to make it faster      r_code = ''' -	library(rjson) +	library(jsonlite)  	dir.name <- '%s'  	tpm.file <- '%s'  	take.log <- '%s' @@ -141,7 +141,7 @@ def make_json_file_using_r(dir_name, glb_param_dict): # use r script to make it  	    dir.create(dir.name)  	}  	for (i in 1:dim(X)[1]) { -	    y <- toJSON(X[i,]) +	    y <- toJSON(unbox(X[i,]), digits=I(3), pretty=TRUE)  	    file.name = paste(dir.name, paste(gene.id[i], 'json', sep='.'), sep='/')  	    cat(y, file=file.name)  	} | 
