-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathconfigurator.py
More file actions
executable file
·166 lines (150 loc) · 7.14 KB
/
configurator.py
File metadata and controls
executable file
·166 lines (150 loc) · 7.14 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
#!/usr/bin/env python3
import argparse
import yaml
import json
import csv
import os
import sys
from collections.abc import Mapping
def update(d, u):
"""
Recursively updates the entries in a given dictionary
:param d: The dictionary to be updated
:param u: The values which will be added to the input dictionary
:return: Updated dictionary
"""
for k, v in u.items():
if isinstance(v, Mapping):
d[k] = update(d.get(k, {}), v)
else:
d[k] = v
return d
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="JSON config generator for the Variant Calling Pipeline")
parser.add_argument('--project-config', '-c',
dest='project_config',
help='Project specific .yaml config file',
type=str)
parser.add_argument('--pipeline-config', '-p',
dest='pipeline_config',
help='Pipeline specific .yaml config file',
type=str)
args = parser.parse_args()
# Read config files
# default will contain pipeline configurations
default = {}
# specific will contain the project configurations
specific = {}
with open(args.pipeline_config, 'r') as stream:
try:
default = yaml.safe_load(stream)
except yaml.YAMLError as exception:
sys.stderr.write(str(exception))
with open(args.project_config, 'r') as stream:
try:
specific = yaml.safe_load(stream)
except yaml.YAMLError as exception:
sys.stderr.write(str(exception))
update(default, specific)
# Rename the updated dictionary
config = default
# Create the output folders
project_path = config['project_path']
output_path = os.path.join(project_path, config['genome_version'])
raw_data_path = os.path.join(project_path, config['genome_version'], 'raw')
json_path = os.path.join(project_path, 'config_files')
if not os.path.exists(project_path):
os.mkdir(project_path)
if not os.path.exists(output_path):
os.mkdir(output_path)
if not os.path.exists(raw_data_path):
os.mkdir(raw_data_path)
if not os.path.exists(json_path):
os.mkdir(json_path)
# Parse necessary configurations
project_genome = config['genome_version']
inputs_dict = {'variant_calling.project_name': config['project_name'],
'variant_calling.project_path': config['project_path'],
'variant_calling.genome_version': project_genome
}
# Collect direct configurations for WDL tasks
for key in config['bypass_parser']:
inputs_dict[key] = config['bypass_parser'][key]
# Parse sample specific configurations
sas_file = config['sample_annotation']
sas_dict = {}
germline_samples = []
tumor_samples = []
with open(sas_file, 'r') as sas:
reader = csv.DictReader(sas, dialect='excel')
for row in reader:
if 'sample_name' in row:
if row['sample_name'] in sas_dict:
sas_dict[row['sample_name']].append(row)
else:
sas_dict[row['sample_name']] = [row]
if row['sample_type'] == 'germline':
germline_samples.append(row['sample_name'])
if row['sample_type'] == 'tumor':
tumor_samples.append(row['sample_name'])
# WDL workflow will iterate over these samples
inputs_dict['variant_calling.sample_list'] = list(sas_dict.keys())
if len(germline_samples) > 0:
inputs_dict['variant_calling.germline_samples'] = germline_samples
if len(tumor_samples) > 0:
inputs_dict['variant_calling.tumor_samples'] = tumor_samples
# Read sample details
sample_dicts = []
for sample in sas_dict:
sample_dict = {'sample_name': sample,
'sample_type': sas_dict[sample][0]['sample_type'],
'library': sas_dict[sample][0]['library'],
'raw_bams': ''}
if 'UMI' in sas_dict[sample][0]:
sample_dict['UMI'] = sas_dict[sample][0]['UMI']
if 'target_intervals' in sas_dict[sample][0]:
sample_dict['target_intervals'] = sas_dict[sample][0]['target_intervals']
if 'matched_normal' in sas_dict[sample][0]:
if sas_dict[sample][0]['sample_type'] == 'tumor' and sas_dict[sample][0]['matched_normal'] != '':
sample_dict['matched_normal'] = sas_dict[sample][0]['matched_normal']
elif sas_dict[sample][0]['sample_type'] == 'tumor' and sas_dict[sample][0]['matched_normal'] == '':
sample_dict['matched_normal'] = 'NULL'
elif sas_dict[sample][0]['sample_type'] == 'germline':
sample_dict['matched_normal'] = sample
row_list = sas_dict[sample]
number_of_rows = len(row_list)
bam_sources = []
raw_size_mb = 0
# Collect the list of raw data input files for each sample
for i in range(number_of_rows):
if 'data_source' in row_list[i] and row_list[i]['data_source'] != '':
source_template = config['data_sources'][row_list[i]['data_source']]
source = source_template.format(**row_list[i])
if os.path.exists(source):
bam_sources.append(source)
if os.path.exists(source):
source_stats = os.stat(source)
raw_size_mb += int(source_stats.st_size / (1024 * 1024))
source_basename = os.path.basename(source)
relative_path = os.path.relpath(source, raw_data_path)
if not os.path.islink(os.path.join(raw_data_path, source_basename)):
os.symlink(relative_path, os.path.join(raw_data_path, source_basename))
else:
print('WARNING: Could not locate {} for sample {}'.format(source, sample))
# Skip the sample in case the input files weren't located
if len(bam_sources) == 0:
print('WARNING: Could not locate any raw data files for sample {}, skipping.'.format(sample))
inputs_dict['variant_calling.sample_list'].remove(sample)
else:
print('Sample {} has {} data sources, total size is {}MB'.format(sample, len(bam_sources), raw_size_mb))
sample_dict['raw_bams'] = ' '.join(bam_sources)
sample_dict['raw_size_mb'] = raw_size_mb
sample_tsv = os.path.join(json_path, '{}.tsv'.format(sample))
with open(sample_tsv, 'w') as output:
for key in sample_dict:
output.write('{}\t{}\n'.format(key, sample_dict[key]))
# Dump the inputs.json file
project_json = os.path.join(json_path, '{}.inputs.json'.format(config['project_name']))
with open(project_json, 'w') as output:
json.dump(inputs_dict, output, indent=2)
print("\nProject output folder:\n{}".format(project_path))