Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- TransactionID MerchantStore MerchantCity TransactionDate
- import sys
- from awsglue.transforms import *
- from awsglue.utils import getResolvedOptions
- from pyspark.context import SparkContext
- from awsglue.context import GlueContext
- from awsglue.job import Job
- from joblib import Parallel, delayed
- import multiprocessing
- glueContext = GlueContext(SparkContext.getOrCreate())
- # Created this list just to emulate the behavior of partition schemes with only one table in Redshift.
- partition_keys = ['txn_type','amount','trans_date','acceptor_ref','location_schema','settlement_date','merchant_city','merchant_state','merchant_country','mcc','industry_code','tran_code','reason_code','plan_id','pin_txn','eci','prescore_amount','batch_date','src_file_name','load_time']
- txn_table_df = glueContext.create_dynamic_frame_from_options (
- connection_type = 'redshift',
- connection_options = {"url": "jdbc:redshift://testredshiftcluster.**.us-east-1.redshift.amazonaws.com:5439/dev", "user": "**", "password": "**","dbtable": "loyalty.dailyclienttxn", "redshiftTmpDir": "s3://loyalty-poc-arm/tempDirectory/"}
- )
- def read_and_write(partition_key):
- path = "s3://loyalty-poc-arm/allpartitionsWithouParallelRun4/" + partition_key
- glueContext.write_dynamic_frame_from_options(
- frame = txn_table_df,
- connection_type = "s3",
- connection_options = {"path": path, "partitionKeys": [partition_key]},
- format = "parquet")
- #Used joblib to parallel execute the for loop so that I can write in parallel
- results = Parallel(n_jobs=-1, prefer="threads")(delayed(read_and_write)(partition_key) for partition_key in partition_keys)
- Worker type: G.2X
- No of workers: 149
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement