Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- #!/usr/bin/env python
- import csv
- import json
- import os.path
- import sqlite3
- def flatten_results(row):
- result = dict(**row)
- result.update(json.loads(row['job_cache']))
- return result
- def main():
- path = os.path.expanduser('~/ondemand/data/sys/myjobs/production.sqlite3')
- db = sqlite3.connect(path)
- db.row_factory = sqlite3.Row
- conn = db.cursor()
- with open(os.path.expanduser('job_composer_export.csv'), 'w') as output_file:
- headers = ('pbsid', 'cluster', 'created', 'host', 'name', 'status', 'script')
- query = '''
- SELECT
- j.created_at as created, w.name as name,
- j.job_cache as job_cache, w.batch_host as cluster,
- j.status as status
- FROM jobs AS j
- LEFT JOIN workflows AS w ON j.workflow_id = w.id;
- '''
- writer = csv.DictWriter(output_file, headers)
- writer.writeheader()
- writer.writerows([
- dict( (k, v) for k, v in row.items() if k in headers )
- for row in map(
- flatten_results,
- conn.execute(query).fetchall()
- )
- ])
- if __name__ == '__main__':
- main()
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement