# Usage: python html_network.py -f edges.txt -r parameter_for_buildRmatrix.txt -c parameter_for_buildCmatrix.txt -n parameter_for_net.txt
# Purpose: make a summary.html plus its associated files (stored in folder edges) given an edge file (edges.txt). These files will be served as static files online. The total volumn of these static files can be quite large, as we get one file for each edge.
#
# This program is used in update_network.py.
#
# Created on 26 Feb 2017, SLCU, Hui
# Last modified 24 Mar 2017, SLCU, Hui
# Last modified 21 Apr 2017, SLCU, Hui [w2ui for regulatee and regulator tables]
# Last modified 19 Jun 2017, SLCU, Hui [changed text_to_dict to fit the updated RNA_SEQ_INFO_DATABASE]
# Last modified 29 Jun 2017, SLCU, Hui [added key 'sample_id' in text_to_dict]
# Last reviewed 01 Fen 2019, Hui [code review]
import sys, os
import networkx as nx # Run this command on MacOS: export PYTHONPATH="/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages
import numpy as np
from optparse import OptionParser
from itertools import islice
import operator
from datetime import datetime
import collections, re, glob
from geneid2name import make_gene_name_AGI_map_dict
from param4net import make_global_param_dict
## Global variables
REGENERATE_ALL_EDGE_FILES = 'YES'
INDEX_PAGE = '../Webapp/static/summary.html' # change
DIR_NAME = '../Webapp/static/edges' # change
RNA_SEQ_INFO_DATABASE = '../Data/information/rnaseq_info_database.txt'
RNA_SEQ_INFO_DATABASE_JSON = '../Data/information/rnaseq_info_database.json'
RNA_SEQ_INFO_HTML_PAGE = 'rnaseqinfo.html'
GENE_ID_TO_GENE_NAME = '../Data/information/AGI-to-gene-names_v2.txt'
CHIP_SEQ_INFO_HTML_PAGE = 'chipseqinfo.html'
RAKE_STOPLIST_FILE = '../Data/information/SmartStoplist.txt'
JSON_DIR = '../Data/history/expr/json' # move this directory to the same place as this file html_network.py, for gene expression scatterplot
JSON_DIR2 = '../Data/history/bind/json2' # for displaying binding plots
C3_DIR = './depend/c3'
W2UI_DIR = './depend/w2ui'
C3_FILES = ['c3.min.css', 'c3.min.js', 'd3.min.js', 'scatterplot.js', 'barchart.js'] # for displaying scatterplots and binding strength
W2UI_FILES = ['jquery.min.for.w2ui.js', 'w2ui.min.js', 'w2ui.min.css']
ALPHA = 0.6 # weight indicating the importance of number of RNA-seq experiments
## function definitions
### RAKE rapid automatic keyphrase extraction (NOT USED). Skip it and jump to my function.
def is_number(s):
try:
float(s) if '.' in s else int(s)
return True
except ValueError:
return False
def load_stop_words(stop_word_file):
"""
Utility function to load stop words from a file and return as a list of words
@param stop_word_file Path and file name of a file containing stop words.
@return list A list of stop words.
"""
stop_words = []
for line in open(stop_word_file):
if line.strip()[0:1] != "#":
for word in line.split(): # in case more than one per line
stop_words.append(word)
return stop_words
def separate_words(text, min_word_return_size):
"""
Utility function to return a list of all words that are have a length greater than a specified number of characters.
@param text The text that must be split in to words.
@param min_word_return_size The minimum no of characters a word must have to be included.
"""
splitter = re.compile('[^a-zA-Z0-9_\\+\\-/]')
words = []
for single_word in splitter.split(text):
current_word = single_word.strip().lower()
#leave numbers in phrase, but don't count as words, since they tend to invalidate scores of their phrases
if len(current_word) > min_word_return_size and current_word != '' and not is_number(current_word):
words.append(current_word)
return words
def split_sentences(text):
"""
Utility function to return a list of sentences.
@param text The text that must be split in to sentences.
"""
sentence_delimiters = re.compile(u'[.!?,;:\t\\\\"\\(\\)\\\'\u2019\u2013]|\\s\\-\\s')
sentences = sentence_delimiters.split(text)
return sentences
def build_stop_word_regex(stop_word_file_path):
stop_word_list = load_stop_words(stop_word_file_path)
stop_word_regex_list = []
for word in stop_word_list:
word_regex = r'\b' + word + r'(?![\w-])' # added look ahead for hyphen
stop_word_regex_list.append(word_regex)
stop_word_pattern = re.compile('|'.join(stop_word_regex_list), re.IGNORECASE)
return stop_word_pattern
def generate_candidate_keywords(sentence_list, stopword_pattern):
phrase_list = []
for s in sentence_list:
tmp = re.sub(stopword_pattern, '|', s.strip())
phrases = tmp.split("|")
for phrase in phrases:
phrase = phrase.strip().lower()
if phrase != "":
phrase_list.append(phrase)
return phrase_list
def calculate_word_scores(phraseList):
word_frequency = {}
word_degree = {}
for phrase in phraseList:
word_list = separate_words(phrase, 0)
word_list_length = len(word_list)
word_list_degree = word_list_length - 1
#if word_list_degree > 3: word_list_degree = 3 #exp.
for word in word_list:
word_frequency.setdefault(word, 0)
word_frequency[word] += 1
word_degree.setdefault(word, 0)
word_degree[word] += word_list_degree #orig.
#word_degree[word] += 1/(word_list_length*1.0) #exp.
for item in word_frequency:
word_degree[item] = word_degree[item] + word_frequency[item]
# Calculate Word scores = deg(w)/frew(w)
word_score = {}
for item in word_frequency:
word_score.setdefault(item, 0)
word_score[item] = word_degree[item] / (word_frequency[item] * 1.0) #orig.
#word_score[item] = word_frequency[item]/(word_degree[item] * 1.0) #exp.
return word_score
def generate_candidate_keyword_scores(phrase_list, word_score):
keyword_candidates = {}
for phrase in phrase_list:
keyword_candidates.setdefault(phrase, 0)
word_list = separate_words(phrase, 0)
candidate_score = 0
for word in word_list:
candidate_score += word_score[word]
keyword_candidates[phrase] = candidate_score
return keyword_candidates
class Rake(object):
def __init__(self, stop_words_path):
self.stop_words_path = stop_words_path
self.__stop_words_pattern = build_stop_word_regex(stop_words_path)
def run(self, text):
sentence_list = split_sentences(text)
phrase_list = generate_candidate_keywords(sentence_list, self.__stop_words_pattern)
word_scores = calculate_word_scores(phrase_list)
keyword_candidates = generate_candidate_keyword_scores(phrase_list, word_scores)
sorted_keywords = sorted(keyword_candidates.iteritems(), key=operator.itemgetter(1), reverse=True)
return sorted_keywords
### my functions
def get_id(s):
lst = s.split(' ')
return lst[0]
def get_name(s, agi2name_dict):
s = s.strip()
if s == '':
return '???'
if s in agi2name_dict:
name = agi2name_dict[s]
lst = name.split(';')
return lst[0]
else:
return s
def show_path(G, lst, options):
s = ''
n = len(lst)
count = 0
for i in range(n-1):
u = lst[i]
v = lst[i+1]
e = G.get_edge_data(u, v)
padding = ''
if e['weight'] > 0:
s += padding + '%s\t(%s,%2.2f)\t-> ' % (u, e['color'], e['weight']) + ('[%s]\n' % (e['condition']) if options.cond==True else '\n')
else:
s += padding + '%s\t(%s,%2.2f)\t|| ' % (u, e['color'], e['weight']) + ('[%s]\n' % (e['condition']) if options.cond==True else '\n')
count += 4
print(s + v)
print('')
def k_shortest_paths(G, source, target, k, weight=None):
return list(islice(nx.shortest_simple_paths(G, source, target, weight=weight), k))
def not_bad_line(s):
if s.strip() == '':
return False
if 'WARNING' in s:
return False
if 'number' in s:
return False
if 'Need' in s:
return False
if 'Error' in s:
return False
if 'Too' in s:
return False
if not s.startswith('AT'): # need modification for other organisms
return False
return True
def build_network_from_file(fname):
''' build the network from the big edge file, edges.txt. '''
MG = nx.MultiDiGraph(max_rsubset_size=1400) # maximum size of conditionR list
max_rsize = 0
f = open(fname)
cond_list = []
for line in f:
line = line.strip()
if not_bad_line(line):
lst = line.split('\t')
g1 = lst[0].split()[0] # target gene id
g2 = lst[1].split()[0] # source gene id
MG.add_node(g1)
MG.add_node(g2)
edge_type = lst[3] # all or mix
condR_lst = []
condC_lst = []
model_fit_measure = '?'
if len(lst) > 6:
condR = lst[4]
condR_lst = lst[4].split()
condC = lst[5]
condC_lst = lst[5].split()
model_fit_measure = lst[6]
if model_fit_measure == '.' and edge_type == 'mix':
model_fit_measure = '-1000.0' # RNA-seq samples were selected using post.translation.3. Search '-1000.0' in QUICKSTART.html for more detail.
if '=' in model_fit_measure: # in early days, the log likelihood field looks like loglik=-1234.2
model_fit_measure = model_fit_measure.split('=')[1] # remove 'loglik='
size_condR = len(condR_lst)
if size_condR > max_rsize:
max_rsize = size_condR
create_date = '20161201' # default 2016-12-01
if len(lst) > 7: # has date information, date information is the 8th column
create_date = lst[7]
metric = float(lst[8]) # appended by update_network.py
tissue_or_method = lst[9] # appended by update_network.py
score = float(lst[2]) # strength of various kinds of relationship.
# Not sure why I distinguished 'all' and 'mix', as the add_edge statements are the same.
if edge_type == 'all':
if score > 0:
MG.add_edge(g2, g1, action='>', weight=score, metric=metric, conditionR=condR_lst, conditionC=condC_lst, rmse=model_fit_measure, edge_date=create_date, subset=tissue_or_method)
elif score < 0:
MG.add_edge(g2, g1, action='X', weight=score, metric=metric, conditionR=condR_lst, conditionC=condC_lst, rmse=model_fit_measure, edge_date=create_date, subset=tissue_or_method)
if edge_type == 'mix':
if score > 0:
MG.add_edge(g2, g1, action='>', weight=score, metric=metric, conditionR=condR_lst, conditionC=condC_lst, rmse=model_fit_measure, edge_date=create_date, subset=tissue_or_method)
elif score < 0:
MG.add_edge(g2, g1, action='X', weight=score, metric=metric, conditionR=condR_lst, conditionC=condC_lst, rmse=model_fit_measure, edge_date=create_date, subset=tissue_or_method)
f.close()
MG.graph['max_rsubset_size'] = max_rsize
return MG
def get_value(s, delimit):
''' Get the value after the first delimit. '''
lst = s.split(delimit, 1) # split by the first delimit
return lst[1].strip()
def text_to_dict(fname, ignore_first_line=True):
''' fname is RNA_SEQ_INFO_DATABASE (see above). '''
if not os.path.exists(fname):
print('html_network.py: you must provide %s. See parse_ena_xml.py on how to make it.' % (fname))
sys.exit()
d = {}
f = open(fname)
lines = f.readlines()
if ignore_first_line == True:
lines = lines[1:]
f.close()
for line in lines:
line = line.strip()
lst = line.split('\t')
run_id = lst[0]
d[run_id] = {} # run_id is ENA/SRA run id
d[run_id]['experiment_id'] = lst[2]
if len(lst) < 5:
continue
d[run_id]['project_id'] = lst[4]
d[run_id]['sample_id'] = lst[1].split('...')[0]
d[run_id]['description'] = '\t'.join(lst[5:])
return d
def get_true_run_id(s):
s = s[2:] # s looks like R0SRR1548701XX, so 2 is the position of 'S'.
index = s.find('X')
if index >= 0: # we don't need X
return s[0:index]
return s
def make_rna_seq_info_dict(fname):
db_dict = text_to_dict(RNA_SEQ_INFO_DATABASE)
f = open(fname)
d = {}
for line in f:
line = line.strip()
if line.startswith('@'):
run_id = line[1:] # run_id is sth like R0SRR1548701XX
run_id2 = get_true_run_id(run_id)
if run_id2 in db_dict:
d[run_id] = db_dict[run_id2]
else:
d[run_id] = {'project_id':'#', 'experiment_id':'#', 'sample_id':'#', 'description':'NA'}
f.close()
return d
def make_rna_seq_info_html_page(fname, d):
f = open(fname, 'w')
f.write('
')
for k in sorted(d.keys()):
run_link = 'http://www.ebi.ac.uk/ena/data/view/%s' % (get_true_run_id(k))
s = '
\n'
f.write(s)
f.write('')
f.close()
def make_chip_seq_info_dict(fname):
''' See QUICKSTART.html#parameter-for-buildcmatrix '''
f = open(fname)
d = {}
for line in f:
line = line.strip()
if line.startswith('@'):
experiment_id = line[1:]
d[experiment_id] = {}
if line.startswith('PROTEIN_ID'):
d[experiment_id]['PROTEIN_ID'] = get_value(line, ':')
if line.startswith('PROTEIN_NAME'):
d[experiment_id]['PROTEIN_NAME'] = get_value(line, ':')
if line.startswith('DATA_NAME'):
d[experiment_id]['DATA_NAME'] = get_value(line, ':')
if line.startswith('DESCRIPTION'):
d[experiment_id]['DESCRIPTION'] = get_value(line, ':')
if line.startswith('LOCATION'):
d[experiment_id]['LOCATION'] = get_value(line, ':')
if line.startswith('NOTE'):
d[experiment_id]['NOTE'] = get_value(line, ':')
f.close()
return d
def make_chip_seq_info_html_page(fname, d):
f = open(fname, 'w')
f.write('')
for k in sorted(d.keys()):
s = '
\n'
f.write(s)
f.write('')
f.close()
def make_link_string_for_cond(s, type):
''' s is a string of RNA-seq IDs or ChIP IDs. '''
lst = s.split()
result = ''
for x in lst:
if type == 'rnaseq':
path = '%s#%s' % (RNA_SEQ_INFO_HTML_PAGE, x)
else:
path = '%s#%s' % (CHIP_SEQ_INFO_HTML_PAGE, x)
result += '%s ' % (path, x)
return result
def get_chip_signal(s, d):
''' extract signal information, and return the words ordered by frequency '''
lst = s.split()
result = ''
for x in lst:
desc = d[x]['DESCRIPTION']
lst2 = desc.split('\t')
for y in lst2:
if y.startswith('SIGNAL='):
result += ';' + y[7:] # 7 means after the '=' in 'SIGNAL='
break
return word_freq(result)
def get_chip_phenotype(s, d):
''' extract phenotype information, and return the words ordered by frequency '''
lst = s.split()
result = ''
for x in lst:
desc = d[x]['DESCRIPTION']
lst2 = desc.split('\t')
for y in lst2:
if y.startswith('PHENOTYPE='):
result += ';' + y[10:] # 10 means after the '=' in 'PHENOTYPE='
break
return word_freq(result)
def word_freq(s): # for ChIP-seq data
''' s is string. return a string of words ordered by frequency '''
if s == '':
return ''
lst = s.split(';')
d = {}
for x in lst:
lst2 = x.split()
for y in lst2:
#k = y.lower()
k = y
k = k.strip(',')
k = k.strip('.')
k = k.strip(')')
k = k.strip('(')
if not k.lower() in ['at', 'in', 'to', 'with', ',', '.', ':', '-']: # exclude these words
if not k in d:
d[k] = 1
else:
d[k] += 1
sorted_tuples = sorted(d.items(), key=operator.itemgetter(1), reverse=True)
first_items = [x[0] for x in sorted_tuples]
return ' '.join(first_items)
def word_freq2(lst): # for RNA-seq data
''' s is string. return a string of words ordered by frequency '''
if lst == []:
return ''
d = {}
for x in lst: # each description
lst2 = x.split()
for y in lst2: # each word
k = y
k = k.strip(',') # remove superfluous charaters, if any
k = k.strip('.')
k = k.strip(')')
k = k.strip('(')
k = k.strip(';')
if not k.startswith('SRR') and not k.startswith('ERR') and not k.startswith('DRR') and not k.isdigit() and not ':' in k and len(k) > 1 and not k.lower() in ['just', 'library', 'libraries', 'dna', 'nextseq', 'nextseq500', 'sequencing', 'end', 'al;', 'which', 'analyse', 'analyze', 'analyzer', 'whole-genome', 'thus', 'plant', 'plants', 'future', 'such', 'not', 'alone', 'most', 'within', 'into', 'but', 'between', 'we', 'is', 'or', 'also', 'was', 'can', 'be', 'use', 'kit', 'used', 'et', 'al', 'by', 'this', 'the', 'their', 'at', 'in', 'to', 'on', 'with', ',', '.', ':', '-', 'rna-seq', 'rnaseq', 'of', 'hiseq', 'hiseq2000', 'illumina', 'arabidopsis', 'thaliana', 'from', '
[title]', '
[description]', 'using', 'were', 'are', 'and', 'under', 'a', 'an', 'one', 'two', 'three', 'as', 'for', 'after', 'none', 'mapping', 'na', 'whole', 'chip-seq', 'paired']: # exclude these strings
if not k in d:
d[k] = 1
else:
d[k] += 1
sorted_tuples = sorted(d.items(), key=operator.itemgetter(1), reverse=True)
first_items = [x[0] + ' (' + str(x[1]) + ')' for x in sorted_tuples]
return ' '.join(first_items)
def word_freq3(lst): # for RNA-seq data, bag-of-words model
''' similar to word_freq2, but may be faster '''
if lst == []:
return ''
bow = [collections.Counter(re.findall(r'\w+', s)) for s in lst] # bag of words
d = sum(bow, collections.Counter()) # frequency of each word
sorted_tuples = d.most_common(len(d))
exclude_lst = ['basis', 'requires', 'population', 'resolution', 'via', 'overall', 'elements', 'grown', 'expression', 'appears', 'total', 'have', 'here', 'of', 'just', 'type', 'transcriptomes', 'transcriptome', 'transcriptomic', 'transcription', 'transcriptional', 'report', 'during', 'diversity', 'investigated', 'library', 'per', 'libraries', '2500', '2000', '1210', '1001', '1107', 'dna', 'nextseq', 'nextseq500', 'seq', 'sequencing', 'sequencing;', 'end', 'al;', 'whereas', 'which', 'analyse', 'analyze', 'analyzer', 'quality', 'analysis', 'analyses', 'whole-genome', 'thus', 'plant', 'plants', 'future', 'such', 'not', 'alone', 'most', 'molecular', 'within', 'into', 'but', 'however', 'between', 'we', 'is', 'origin', 'or', 'also', 'was', 'can', 'be', 'been', 'use', 'kit', 'used', 'et', 'al', 'by', 'this', 'that', 'these', 'the', 'their', 'at', 'in', 'to', 'on', 'with', 'mrna', 'rna', 'rnas', 'rna-seq', 'rnaseq', 'of', 'hiseq', 'hiseq2000', 'illumina', 'arabidopsis', 'thaliana', 'from', 'roles', 'title', 'description', 'using', 'were', 'are', 'and', 'unknown', 'under', 'a', 'an', 'one', 'two', 'three', 'as', 'for', 'found', 'after', 'none', 'mapping', 'na', 'whole', 'chip-seq', 'play', 'paired', 'br', 'future', 'rowan', 'study', 'studies', 'may', 'sample', 'truseq', 'until', 'gene', 'genes', 'genetic', 'genome', 'genomes', 'units', 'its', 'yelina', 'data', 'set', 'tube', 'single-base', 'size', 'room', 'along', 'before', 'several', 'less', 'protocol', 'profiling', 'profiles', 'conditions', 'collection', 'complete', 'reveal', 'given', 'ii', 'isolated', 'described', 'describe', 'na', 'worldwide', 'accessions', 'identify', 'identification'] # exclude these words
first_items = [x[0] + ' (' + str(x[1]) + ')' for x in sorted_tuples if x[1] > 2 and len(x[0]) > 1 and not x[0].startswith('SRR') and not x[0].startswith('ERR') and not x[0].startswith('DRR') and not x[0].isdigit() and not ':' in x[0] and not x[0].lower() in exclude_lst]
return ' '.join(first_items)
def get_rna_signal(s, d):
''' extract RNA-seq signal information, and return the words ordered by frequency '''
lst = s.split()
result = []
MAX_WORDS = 60
if lst[0] == '.': # all RNA samples
return 'all available signals'
for x in lst: # x is an RNA sample ID, words by frequency
if x in d:
desc = d[x]['description']
desc_lst = re.split(' ', desc)
short_lst = []
for x in desc_lst:
short_lst.extend(x.split())
if len(short_lst) > MAX_WORDS: # average english words 5.1, take the first 100 words, should be informative enough. Longer desc require more computation time.
short_lst = short_lst[:MAX_WORDS]
break
# index = desc.find(' ')
# if index > 0:
# desc = desc[:index]
result.append((' '.join(short_lst)).strip())
return word_freq3(result)
def get_rna_signal2(s, d): # not very successful, and slow, so NOT used
''' extract RNA-seq signal information, and return the words ordered by frequency '''
lst = s.split()
if lst[0] == '.': # all RNA samples
return 'all available signals'
text = ''
for x in lst: # x is an RNA sample ID, words by frequency
if x in d:
desc = d[x]['description']
text += desc.strip().rstrip('.') + '. '
rake = Rake(RAKE_STOPLIST_FILE)
keywords = rake.run(text)
return ' '.join( [ t[0] + ' (' + str(int(t[1])) + ')' for t in keywords ] )
def replace_old_html_page(fname, edge_date):
''' If the file fname needs updating, return True. '''
if not os.path.exists(fname): # if the file does not exist, it needs updating
return True
# Check all files AT2G43790_AT1G03080_0.html, AT2G43790_AT1G03080_1.html, AT2G43790_AT1G03080_2.html, etc. If any of them is too old, create a new one.
index = fname.rfind('_')
if index < 0:
print('html_network.py: %s has no underscore.' % (fname))
sys.exit()
fname_part = fname[:index]
for fn in glob.glob(os.path.join(fname_part, '*.html')):
file_date = datetime.fromtimestamp(os.path.getmtime(fn)).strftime('%Y%m%d')
if int(edge_date) - int(file_date) > 1: # edge_date is at least 1 day newer than edge file date
return True
return False
def format_date(s):
''' s in the form of 20170419. Return 2017-04-19 '''
s = s.strip()
if len(s) != 8:
return s
return s[0:4] + '-' + s[4:6] + '-' + s[6:]
def make_html_page_for_condition(fname, tf_name, target_name, condRstr, condCstr, edge_date, subset): # important page ***
### if the page already exists, and its information is up-to-date, then don't create it again (to save time)
if REGENERATE_ALL_EDGE_FILES == 'NO' and not replace_old_html_page(fname, edge_date):
return
d3_library = ''
f = open(fname, 'w')
f.write(' %s ' % (d3_library))
### RNA-seq
f.write('
RNA-seq experiments
')
part = os.path.splitext( os.path.basename(fname) )[0] # get file name without extension
id_lst = part.split('_')
gene1_file = os.path.join('json', id_lst[0] + '.json') # TF
gene2_file = os.path.join('json', id_lst[1] + '.json') # target
f.write('
TF is %s %s. Target is %s %s. Edge made on %s. Method: %s.
'% (id_lst[0], '' if tf_name == id_lst[0] else tf_name, id_lst[1], '' if target_name == id_lst[1] else target_name, format_date(edge_date), subset))
cond_lst_str = str(condRstr.split()) # insert to javascript function call code
rnaseq_info_file = os.path.basename(RNA_SEQ_INFO_DATABASE_JSON)
s = '
' % (gene1_file, gene2_file, rnaseq_info_file, cond_lst_str)
f.write(s)
global glb_rna_seq_info_dict
#s = get_rna_signal(condRstr, glb_rna_seq_info_dict) # DISABLED since this is SLOWEST part
# if s.startswith('all available'):
# f.write('
')
gene1_file = os.path.join('json2', id_lst[0] + '.json') # TF
gene2_file = os.path.join('json2', id_lst[1] + '.json' ) # target
cond_lst_str = str(condCstr.split())
s = 'Click for plot ' % (gene2_file, cond_lst_str) # display binding strength
f.write(s)
global glb_chip_seq_info_dict
s = get_chip_signal(condCstr, glb_chip_seq_info_dict)
if s != '':
f.write('
Signal
Note: words are ordered by frequency.
' + '
' + s + '
')
else:
f.write('
Signal
' + '
None.
')
s = get_chip_phenotype(condCstr, glb_chip_seq_info_dict)
f.write('
Phenotype
' + '
' + s + '
')
f.write('
%s
' % (make_link_string_for_cond(condCstr, 'chipseq')))
f.write('')
f.close()
def make_w2ui_table_page(fname, gene_str, download_str, dict_lst_regulates, dict_lst_regulatedby):
''' each element in dict_lst_* must have the form {'strength': '', 'metric': '', 'geneid': '', 'genename': ''} '''
start_part = '''
%s
regulatee table
regulator table
%s
''' % (
download_str)
result = start_part + grid1 + grid2 + end_part
# minify html
lst = re.split(r'\s{2,}', result)
result = ''.join(lst)
f = open(fname, 'w')
f.write(result)
f.close()
def make_html_page(node, G, fname, agi2name_dict):
''' Make html pages for node's successors and predecessors. '''
#f.write('