Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- sqoop-import --connect jdbc:mysql://quickstart.cloudera:3306/retail_db \
- --username retail_dba \
- --password cloudera \
- --table products \
- --fields-terminated-by "|" \
- --lines-terminated-by "\n" \
- --target-dir /user/cloudera/teja_arun/ep02/products \
- --as-textfile \
- -m 1
- from pyspark import Row,HiveContext,SparkContext,SparkConf
- conf=SparkConf().setAppName("ep02").setMaster("yarn-client")
- sc=SparkContext(conf=conf)
- sqlContext=HiveContext(sc)
- productsRDD=sc.textFile("/user/cloudera/teja_arun/ep02/products")
- productsDF=productsRDD.map(lambda rec: Row(product_id=int(rec.split("|")[0]),product_category_id=int(rec.split("|")[1]),product_name=rec.split("|")[2],product_desc=rec.split("|")[3],product_price=float(rec.split("|")[4]),product_image=rec.split("|")[5])).toDF()
- productsDF.registerTempTable("products")
- sqlContext.setConf("spark.sql.shuffle.partitions","4")
- sqlResult=sqlContext.sql("select product_category_id,max(product_price) max_price,min(product_price) min_price, avg(product_price) avg_price, count(product_id) total_products from products where product_price < 100 group by product_category_id order by product_category_id")
- sqlContext.setConf("spark.sql.avro.compression.codec","org.apache.hadoop.io.compress.SnappyCodec")
- sqlResult.write.save("/user/cloudera/teja_arun/ep02/solutions/products_price","com.databricks.spark.avro")
Add Comment
Please, Sign In to add comment