Advertisement
Guest User

Untitled

a guest
Aug 18th, 2017
76
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 21.00 KB | None | 0 0
  1. #v8
  2. #none-align to noalign,delete none-annotation
  3. #change 'expn' to 'expr'
  4. #add some list.txt for rmarkdown part
  5. #recalculate the log2
  6.  
  7.  
  8. import os
  9. import sys
  10. import pandas as pd
  11. import subprocess
  12. import shutil
  13. import re
  14. import numpy as np
  15. import argparse
  16. import datetime
  17.  
  18.  
  19.  
  20.  
  21. def directory_structure(ori_samplefolder, outputpath):
  22. os.chdir(ori_samplefolder)
  23. # Change the work path to original samplefolder (the path saved all *.fastq files)
  24. # capture the fastq file with initial name
  25. global path
  26. path = []
  27. for file in os.listdir(ori_samplefolder):
  28. if file.endswith(".fastq"):
  29. samplename = os.path.splitext(file)[0]
  30. sampleext = os.path.splitext(file)[1]
  31. samplename = samplename.replace('_', '')
  32. new_samplefolder = os.path.join(outputpath,samplename)
  33. if os.path.exists(new_samplefolder):
  34. path.append(new_samplefolder)
  35. print("Work folder: \n"+new_samplefolder+ "\n is exsisting..")
  36. else:
  37. os.makedirs(new_samplefolder)
  38. shutil.copy(file,new_samplefolder+'/'+samplename+sampleext)
  39. path.append(new_samplefolder)
  40. print("Work folder: \n"+ new_samplefolder+'\n will be created and fastq files will be copy to..')
  41. elif file.endswith(".gz"):
  42. subprocess.call(["gunzip *.gz"], shell=True)
  43. samplename = os.path.splitext(file)[0]
  44. sampleext = os.path.splitext(file)[1]
  45. samplename = samplename.replace('_', '')
  46. new_samplefolder = os.path.join(outputpath,samplename)
  47. if os.path.exists(new_samplefolder):
  48. path.append(new_samplefolder)
  49. print("Work folder: \n"+new_samplefolder+ "\n is exsisting..")
  50. else:
  51. os.makedirs(new_samplefolder)
  52. shutil.copy(file,new_samplefolder+'/'+samplename+sampleext)
  53. path.append(new_samplefolder)
  54. print("Work folder: \n"+ new_samplefolder+'\n will be created and fastq files will be copy to..')
  55. print("# The work folder will be: \n" + outputpath)
  56.  
  57.  
  58. def parameters(spe,outputpath):
  59. species = None
  60. refname = None
  61. refpath = None
  62. if spe == 'hsa':
  63. species = ' -o hsa'
  64. refname = 'hg38'
  65. refpath = '/n/data1/genomes/indexes/hg38/hg38.fa'
  66. elif spe == 'mmu':
  67. species = ' -o mmu'
  68. refname = 'mm9'
  69. refpath = '/n/data1/genomes/indexes/mm9/mm9.fa'
  70. elif spe == 'dme':
  71. species = ' -o dme'
  72. refname = 'dm6'
  73. refpath = '/n/data1/genomes/indexes/dm6/dm6.fa'
  74. # if ref.endswith(".fa"):
  75. # fullrefname = os.path.split(ref)[1]
  76. # refname = os.path.splitext(fullrefname)[0]
  77. # print(refname)
  78. else:
  79. print("invalid species code, only support hsa, mmu and dme")
  80.  
  81. if os.path.isfile(outputpath+'/'+'ref.txt'):
  82. os.remove(outputpath+'/'+'ref.txt')
  83. f = open(outputpath + '/' + 'ref.txt', 'w+')
  84. f.write(refpath+'\n')
  85.  
  86. else:
  87. f = open(outputpath +'/'+'ref.txt','w+')
  88. f.write(refpath+'\n')
  89. f.close()
  90. return(species,refname,refpath)
  91.  
  92.  
  93. def parameters_custom(spe,ref):
  94. species = None
  95. refname = None
  96. refpath = None
  97. if spe == 'hsa':
  98. species = ' -o hsa'
  99. refname = os.path.splitext(ref)[0]
  100. refpath = ref
  101. elif spe == 'mmu':
  102. species = ' -o mmu'
  103. refname = os.path.splitext(ref)[0]
  104. refpath = ref
  105. elif spe == 'dme':
  106. species = os.path.splitext(ref)[0]
  107. refname = 'dm6'
  108. refpath = ref
  109. # if ref.endswith(".fa"):
  110. # fullrefname = os.path.split(ref)[1]
  111. # refname = os.path.splitext(fullrefname)[0]
  112. # print(refname)
  113. else:
  114. print("invalid species code, only support hsa, mmu and dme")
  115. return(species,refname,refpath)
  116.  
  117.  
  118. def samplelist(ori_samplefolder,outputpath):
  119. if os.path.isfile(outputpath+'/'+'samplelist.txt'):
  120. os.remove(outputpath+'/'+'samplelist.txt')
  121. f = open(outputpath + '/' + 'samplelist.txt', 'w+')
  122. f.write(ori_samplefolder+'\n')
  123. for file in os.listdir(ori_samplefolder):
  124. if file.endswith(".fastq"):
  125. samplename = os.path.splitext(file)[0]
  126. f.write(samplename + '.fastq\n')
  127. f.close()
  128. else:
  129. f = open(outputpath +'/'+'samplelist.txt','w+')
  130. f.write(ori_samplefolder+'\n')
  131. for file in os.listdir(ori_samplefolder):
  132. if file.endswith(".fastq"):
  133. samplename = os.path.splitext(file)[0]
  134. f.write(samplename + '.fastq\n')
  135. f.close()
  136.  
  137.  
  138. def alignments(referencebase_bwa):
  139. for i in range(len(path)):
  140. os.chdir(path[i])
  141. for file in os.listdir(os.getcwd()):
  142. if file.endswith(".fastq"):
  143. samplename, extension = os.path.splitext(file)
  144. print(
  145. "# The alignment will be running as :\n /n/apps/CentOS7/bin/bwa aln -t 3 " + referencebase_bwa + " " + samplename + ".fastq >" + samplename + ".sai")
  146. subprocess.call(["/n/apps/CentOS7/bin/bwa aln -t 3 " + referencebase_bwa + " "+samplename + ".fastq >" + samplename + ".sai"+'\n'],
  147. shell=True)
  148. print('# building the .sam file :\n'
  149. "/n/apps/CentOS7/bin/bwa samse -n 10 " + referencebase_bwa + " "+samplename + ".sai " + samplename + ".fastq >" + samplename + ".sam"+'\n')
  150. subprocess.call([
  151. "/n/apps/CentOS7/bin/bwa samse -n 10 " + referencebase_bwa + " " + samplename + ".sai " + samplename + ".fastq >" + samplename + ".sam"],
  152. shell=True)
  153.  
  154.  
  155. def annotation(spec,referencebase,outputpath):
  156. # #path of tools
  157. runtools_annotation = 'perl /n/ngs/tools/tcga/v0.2.7/code/annotation/annotate.pl'
  158. runtools_annotation_alignment_stats = 'perl /n/ngs/tools/tcga/v0.2.7/code/library_stats/alignment_stats.pl'
  159. runtools_annotation_graph = 'perl /n/ngs/tools/tcga/v0.2.7/code/library_stats/graph_libs.pl'
  160. runtools_annotation_tcga = 'perl /n/ngs/tools/tcga/v0.2.7/code/custom_output/tcga/tcga.pl'
  161. runtools_expr = 'perl /n/ngs/tools/tcga/v0.2.7/code/library_stats/expression_matrix.pl'
  162. mirnabase = ' -m mirna_21a'
  163.  
  164. fullcommand_annotation = runtools_annotation + mirnabase + " -u "+referencebase + spec + " -p " + outputpath
  165. fullcommand_align_stats = runtools_annotation_alignment_stats + " -p " + outputpath
  166. fullcommand_graph = runtools_annotation_graph + " -p " + outputpath
  167. fullcommand_tcga = runtools_annotation_tcga + mirnabase + spec + " -g "+ referencebase + " -p " + outputpath
  168. fullcommand_expr = runtools_expr + mirnabase +spec+ " -p " + outputpath
  169.  
  170.  
  171. print("# The annotation script will run as: ")
  172. print(fullcommand_annotation+'\n')
  173. subprocess.call([fullcommand_annotation], shell=True)
  174.  
  175. print("# The statistics of annotation script will run as:")
  176. print(fullcommand_align_stats+'\n')
  177. subprocess.call([fullcommand_align_stats], shell=True)
  178.  
  179. print("# The visualization of stats script will run as:")
  180. print(fullcommand_graph+'\n')
  181.  
  182. print("# The tcga results output script will run as:")
  183. print(fullcommand_tcga+'\n')
  184. subprocess.call([fullcommand_tcga], shell=True)
  185.  
  186. print("# The expression matrix script will run as: ")
  187. print(fullcommand_tcga+'\n')
  188. subprocess.call([fullcommand_expr], shell=True)
  189.  
  190.  
  191. def rename(outputpath):
  192. df = pd.read_csv(outputpath + '/' + 'alignment_stats.csv')
  193.  
  194. df.columns = [u'Library', u'Index', u'Total Reads greater 15bp', u'Adapter dimers',
  195. u'Adapter dimers', u'Adapter at 1-14bp', u'Adapter at 15-25bp',
  196. u'Adapter at 26-35bp', u'Adapter after 35bp',
  197. u'Aligned Reads Post-Filter', u'percent Aligned Reads', u'Unaligned Reads',
  198. u'percent Unaligned Reads', u'Filtered Reads without XA',
  199. u'Softclipped Reads', u'Chastity Failed Reads Post-Filter',
  200. u'detected miRNA', u'detected miRNA Covered by 10 Reads',
  201. u'Total miRNA reads', u'Crossmapped miRNA reads', u'mature miRNA reads', u'star miRNA reads',
  202. u'precursor miRNA reads', u'miRNA loop reads', u'unannotated miRNA reads', u'snoRNA reads',
  203. u'tRNA reads', u'rRNA reads', u'snRNA reads', u'scRNA reads', u'srpRNA reads',
  204. u'Other RepeatMasker RNAs reads', u'RNA (No CDS) reads', u'3UTR reads', u'5UTR reads',
  205. u'Coding Exon reads', u'Intron reads', u'LINE reads', u'SINE reads', u'LTR reads', u'Satellite reads',
  206. u'RepeatMasker DNA reads', u'RepeatMasker Low complexity reads',
  207. u'RepeatMasker Simple repeat reads', u'RepeatMasker Other reads',
  208. u'RepeatMasker Unknown reads', u'Unknown reads', u'Total miRNA',
  209. u'Crossmapped miRNA', u'mature miRNA', u'star miRNA',
  210. u'precursor miRNA', u'miRNA loop', u'unannotated miRNA',
  211. u'snoRNA', u'tRNA', u'rRNA', u'snRNA', u'scRNA', u'srpRNA',
  212. u'Other RepeatMasker RNAs', u'RNA(No CDS)', u'3UTR',
  213. u'5UTR', u'Coding Exon', u'Intron', u'LINE', u'SINE',
  214. u'LTR', u'Satellite', u'RepeatMasker DNA',
  215. u'RepeatMasker Low complexity', u'RepeatMasker Simple repeat',
  216. u'RepeatMasker Other', u'RepeatMasker Unknown', u'Unknown']
  217. df1 = df.iloc[:, 0:47]
  218. df2 = (df.iloc[:, 47:76]).replace('%', '', regex=True).astype('float') / 100
  219. df3 = pd.concat([df1, df2], axis=1)
  220. df3.to_csv(outputpath + '/' + "alignment_stats.csv", index=False, header=True)
  221.  
  222. if os.path.isfile(outputpath + '/expn_matrix.txt') == True:
  223. os.rename(outputpath + '/expn_matrix.txt', outputpath + '/miRNA_expr_raw_matrix.txt')
  224.  
  225. if os.path.isfile(outputpath + '/expn_matrix_norm.txt') == True:
  226. rpm = pd.read_table(outputpath + '/expn_matrix_norm.txt', index_col=0, header=0)
  227. rpm = rpm.round(6)
  228. rpm1 = rpm + 1
  229. rpm = rpm.astype(str)
  230. rpm = rpm.replace('0.000000', '0')
  231.  
  232.  
  233. log = np.log2(rpm1)
  234. log = pd.DataFrame(log)
  235. log = log.round(6)
  236. log = log.astype(str)
  237. log = log.replace('0.000000', '0')
  238. log.to_csv(outputpath + '/' + 'miRNA_expr_RPM_plus_1_log2_matrix.txt', sep='\t')
  239.  
  240. rpm.to_csv(outputpath + '/' + 'miRNA_expr_RPM_matrix.txt', sep='\t')
  241. os.remove(outputpath + '/expn_matrix_norm.txt')
  242. if os.path.isfile(outputpath + '/expn_matrix_norm_log.txt') == True:
  243. os.remove(outputpath + '/expn_matrix_norm_log.txt')
  244.  
  245.  
  246. def table_merge(outputpath):
  247.  
  248. result_table_list_1 = ['3_UTR.txt', '5_UTR.txt', 'Intron.txt', 'LINE.txt', 'LTR.txt', 'rmsk_DNA.txt', 'rmsk_RNA.txt', 'rmsk_Simple_repeat.txt', 'rmsk_Unknown.txt',
  249. 'rRNA.txt', 'scRNA.txt', 'SINE.txt', 'snoRNA.txt', 'snRNA.txt', 'srpRNA.txt', 'tRNA.txt', 'Satellite.txt']
  250.  
  251. merged_result = None
  252.  
  253. #create the empty txt table file if that table not exist under sample_features
  254. for i in range(len(path)):
  255. sample_name = os.path.split(path[i])[1]
  256. workpath = path[i] + '/' + sample_name + '_features'
  257. os.chdir(workpath)
  258. for filename in result_table_list_1:
  259. if os.path.exists(workpath + '/' +filename):
  260. continue
  261. else:
  262. f = open(filename,'w+')
  263. f.close()
  264.  
  265. for filename in result_table_list_1:
  266. result = None
  267. for i in range(len(path)):
  268. sample_name = os.path.split(path[i])[1]
  269. file_content = pd.read_csv(path[i] + '/' + sample_name + '_features' + '/' + filename, names=[sample_name], index_col=0)
  270. if result is None:
  271. result = file_content
  272. else:
  273. result = pd.concat([result, file_content], axis = 1)
  274. result['type'] = filename.split('.')[0]
  275. if merged_result is None:
  276. merged_result = result
  277. else:
  278. merged_result = pd.concat([merged_result, result], axis=0)
  279.  
  280. # set the missing value
  281. merged_result = merged_result.fillna('NA')
  282.  
  283. #switch the type column with the second column
  284. type = merged_result.pop('type')
  285. merged_result.insert(0,'type',type)
  286.  
  287. merged_result.to_csv(outputpath+'/'+"Feature_summary_table.txt", sep='\t')
  288. return merged_result
  289.  
  290.  
  291. def filter_miRNA_table():
  292.  
  293. for i in range(len(path)):
  294. sample_name = os.path.split(path[i])[1]
  295. workpath = path[i] + '/' + sample_name + '_features'
  296. os.chdir(workpath)
  297.  
  298. name = []
  299. category = []
  300. mir_id = []
  301. value = []
  302. content = []
  303.  
  304. file = open(workpath+"/"+ "miRNA.txt")
  305. reader = file.readlines()
  306. for line in reader:
  307. if (re.split('[,\s]',line)[1] == 'unannotated') or (re.split('[,\s]',line)[1]) == "precursor" or (re.split('[,\s]',line)[1] == "stemloop"):
  308. name.append(re.split('[,\s]', line)[0])
  309. category.append(re.split('[,\s]', line)[1])
  310. mir_id.append(re.split('[,\s]', line)[1])
  311. value.append(re.split('[,\s]', line)[2])
  312. else:
  313. name.append(re.split('[,\s]', line)[0])
  314. category.append(re.split('[,\s]', line)[1])
  315. mir_id.append(re.split('[,\s]', line)[2])
  316. value.append(re.split('[,\s]', line)[3])
  317. for i in range(len(value)):
  318. content.append(name[i] + " " + category[i] + " " + mir_id[i] + "," + value[i])
  319.  
  320. output = open(workpath +"/" + "miRNA_filtered.txt", "w")
  321. for i in range(len(content)):
  322. output.write(content[i])
  323. output.write('\n')
  324.  
  325.  
  326. # def table_merge_miRNA():
  327. #
  328. # result_table_list_1 = ['miRNA_filtered.txt']
  329. #
  330. # merged_result = None
  331. # print(path)
  332. # #create the empty txt table file if that table not exist under sample_features
  333. # for i in range(len(path)):
  334. # sample_name = os.path.split(path[i])[1]
  335. # print(sample_name)
  336. # workpath = path[i] + '/' + sample_name + '_features'
  337. # os.chdir(workpath)
  338. # for filename in result_table_list_1:
  339. # if os.path.exists(workpath + '/' +filename):
  340. # continue
  341. # else:
  342. # f = open(filename,'w+')
  343. # f.close()
  344. #
  345. # for filename in result_table_list_1:
  346. # result = None
  347. # for i in range(len(path)):
  348. # sample_name = os.path.split(path[i])[1]
  349. # file_content = pd.read_csv(path[i] + '/' + sample_name + '_features' + '/' + filename, names=[sample_name], index_col=0)
  350. # print(file_content)
  351.  
  352.  
  353.  
  354. # if result is None:
  355. # result = file_content
  356. # else:
  357. # result = pd.concat([result, file_content], axis = 1)
  358. # result['type'] = filename.split('.')[0]
  359. # if merged_result is None:
  360. # merged_result = result
  361. # else:
  362. # merged_result = pd.concat([merged_result, result], axis=0)
  363. #
  364. # # set the missing value
  365. # merged_result = merged_result.fillna('NA')
  366. #
  367. # #switch the type column with the second column
  368. # type = merged_result.pop('type')
  369. # merged_result.insert(0,'type',type)
  370. # return merged_result
  371. #
  372.  
  373.  
  374. def table_merge_miRNA(outputpath):
  375.  
  376. result_table_list_1 = ['miRNA_filtered.txt']
  377.  
  378. result = None
  379.  
  380. #create the empty txt table file if that table not exist under sample_features
  381. for i in range(len(path)):
  382. sample_name = os.path.split(path[i])[1]
  383. workpath = path[i] + '/' + sample_name + '_features'
  384. os.chdir(workpath)
  385. for filename in result_table_list_1:
  386. if os.path.exists(workpath + '/' +filename):
  387. continue
  388. else:
  389. f = open(filename,'w+')
  390. f.close()
  391.  
  392. table_head = ['name', 'type', 'mir_id']
  393.  
  394. for filename in result_table_list_1:
  395. result = None
  396. for i in range(len(path)):
  397.  
  398. sample_name = os.path.split(path[i])[1]
  399. table_head.append(sample_name)
  400.  
  401.  
  402. file_content = pd.read_csv(path[i] + '/' + sample_name + '_features' + '/' + filename, header= None, index_col=0)
  403.  
  404.  
  405. if result is None:
  406.  
  407. result = file_content
  408. else:
  409. result = pd.concat([result, file_content], axis = 1)
  410.  
  411. result = result.fillna(0) #########can not set the fill content as NA, since it leads misplacement in next step, to FIX!!!!!##########
  412. # set the missing value
  413. result.to_csv(outputpath+'/'+"miRNA_expr_summary_table.txt",sep='\t',header=False)
  414. merged_result = pd.read_csv(outputpath+'/'+"miRNA_expr_summary_table.txt", sep='\s+',names=table_head)
  415. merged_result.to_csv(outputpath+'/'+"miRNA_expr_summary_table.txt",sep='\t',index=False)
  416. return result
  417. #
  418. #
  419.  
  420.  
  421. def merge_mirna_taglen(outputpath):
  422. result = None
  423.  
  424. for i in range(len(path)):
  425.  
  426. sample_name = os.path.split(path[i])[1]
  427. # print(sample_name)
  428. workpath = path[i] + '/' + sample_name + '_features'
  429. os.chdir(workpath)
  430. file_content = pd.read_csv('filtered_taglengths.csv',header=0)
  431. file_content.insert(0,'sample',sample_name)
  432. file_content.to_csv('filtered_taglengths.txt', sep='\t',index=False, header=True)
  433.  
  434. if result is None:
  435. result = file_content
  436. else:
  437. result = pd.concat([result, file_content], axis = 0)
  438. df = pd.DataFrame(result)
  439. col_list = list(df)
  440. col_list.remove('sample')
  441. col_list.remove('taglen')
  442. df['total'] = df[col_list].sum(axis=1)
  443. df.to_csv(outputpath+'/'+"merged_taglen_table.csv", index=False,header=True)
  444.  
  445.  
  446. def remove_files():
  447. print("# The redundant file will be transformed and removed as : ")
  448. for i in range(len(path)):
  449. os.chdir(path[i])
  450. for file in os.listdir(os.getcwd()):
  451. if file.endswith(".fastq"):
  452. os.remove(file)
  453. if file.endswith(".sam"):
  454. samplename = os.path.splitext(file)[0]
  455. print("\n samtools view -Su "+ file + ">" + samplename +".bam ")
  456. subprocess.call(["samtools view -Su "+ file + ">" + samplename +".bam "],shell=True)
  457.  
  458. print("\n samtools sort " + samplename +".bam -o " + samplename + ".sorted.bam ")
  459. subprocess.call(["samtools sort " + samplename +".bam -o " + samplename + ".sorted.bam "], shell=True)
  460.  
  461. print("\n samtools index "+samplename+".sorted.bam")
  462. subprocess.call(["samtools index "+samplename+".sorted.bam"], shell=True)
  463. os.remove(file)
  464. os.remove(samplename+'.bam')
  465. os.remove(samplename+'.sai')
  466.  
  467.  
  468. def main():
  469. parser = argparse.ArgumentParser()
  470. requiredNamed = parser.add_argument_group('required named arguments')
  471. requiredNamed.add_argument("-i", "--input", action="store",dest='fastq_dir',required=True, help="Please enter the path of sample .fastq folder")
  472. requiredNamed.add_argument("-o", "--ouput", action="store",dest='out_dir',required=True, help="Please enter the path of directory to save result")
  473. requiredNamed.add_argument("-s", "--species", action="store",dest='specie',required=True, help="Please enter the species type of your sample: (hsa, mmu or dme)")
  474.  
  475. parser.add_argument("--noalign",action="store_true",default=False, dest='alignment', help="if you already alignment .sam files under your sample work folder, and you may skip alignment step please enter this argument")
  476. parser.add_argument("-r", "--reference", action="store", default=None, dest='ref', help="indicate the custom reference instead of default e.g. /n/data1/genomes/indexes/hg38/hg38.fa")
  477. args = parser.parse_args()
  478.  
  479. f = open(args.fastq_dir+ '/' + "logging.txt", 'w+')
  480. logging_path= (args.fastq_dir+ '/' + "logging.txt")
  481. sys.stdout = f
  482. print('# The used arguments are:')
  483. a = 'python '
  484. for i in range(len(sys.argv)):
  485. a = a + ' ' + sys.argv[i]
  486.  
  487. b = a.replace("'", '')
  488. b = b.replace("[", '')
  489. b = b.replace(",", '')
  490. b = b.replace("]", '')
  491.  
  492. print(b)
  493.  
  494. directory_structure(args.fastq_dir, args.out_dir)
  495.  
  496. # parameter settings
  497. if args.ref is None:
  498. tcga_arg = parameters(args.specie,args.out_dir)
  499. print('# The using reference genome index is:')
  500. print(tcga_arg[2] + '\n')
  501. else:
  502. tcga_arg = parameters_custom(args.specie,args.ref)
  503. print('# The using reference genome index is:')
  504. print(tcga_arg[2] + '\n')
  505.  
  506. samplelist(args.fastq_dir, args.out_dir)
  507.  
  508.  
  509. if args.alignment == False:
  510. alignments(tcga_arg[2])
  511.  
  512. annotation(tcga_arg[0], tcga_arg[1], args.out_dir)
  513.  
  514.  
  515. rename(args.out_dir)
  516.  
  517. table_merge(args.out_dir)
  518.  
  519. filter_miRNA_table()
  520.  
  521. table_merge_miRNA(args.out_dir)
  522.  
  523. merge_mirna_taglen(args.out_dir)
  524.  
  525. shutil.copy("/n/core/Bioinformatics/research/cxu/mirna_pipeline/python_pipeline/rmarkdown_mod.Rmd", args.out_dir)
  526. remove_files()
  527.  
  528. today = datetime.date.today()
  529. print('\n')
  530. print('Date:')
  531. print(today)
  532.  
  533.  
  534. f.close()
  535. if os.path.isfile(args.out_dir + '/logging.txt') == True:
  536. os.remove(args.out_dir + '/logging.txt')
  537. shutil.move(logging_path,args.out_dir)
  538. if __name__ == '__main__':
  539. main()
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement