Advertisement
Guest User

Untitled

a guest
Oct 18th, 2019
107
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 1.04 KB | None | 0 0
  1. #!/usr/bin/env python
  2. import csv
  3. import json
  4. import os.path
  5. import sqlite3
  6.  
  7.  
  8. def flatten_results(row):
  9. result = dict(**row)
  10. result.update(json.loads(row['job_cache']))
  11.  
  12. return result
  13.  
  14.  
  15. def main():
  16. path = os.path.expanduser('~/ondemand/data/sys/myjobs/production.sqlite3')
  17. db = sqlite3.connect(path)
  18. db.row_factory = sqlite3.Row
  19. conn = db.cursor()
  20.  
  21. with open(os.path.expanduser('job_composer_export.csv'), 'w') as output_file:
  22. headers = ('pbsid', 'cluster', 'created', 'host', 'name', 'status', 'script')
  23. query = '''
  24. SELECT
  25. j.created_at as created, w.name as name,
  26. j.job_cache as job_cache, w.batch_host as cluster,
  27. j.status as status
  28. FROM jobs AS j
  29. LEFT JOIN workflows AS w ON j.workflow_id = w.id;
  30. '''
  31. writer = csv.DictWriter(output_file, headers)
  32. writer.writeheader()
  33. writer.writerows([
  34. dict( (k, v) for k, v in row.items() if k in headers )
  35. for row in map(
  36. flatten_results,
  37. conn.execute(query).fetchall()
  38. )
  39. ])
  40.  
  41.  
  42. if __name__ == '__main__':
  43. main()
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement