Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- from pyspark.sql import SQLContext
- sqlContext = SQLContext(sc)
- df = sqlContext.load(source="jdbc", url="jdbc:postgresql://host/dbname", dbtable="schema.tablename")
- os.environ['SPARK_CLASSPATH'] = "C:UsersACERNEW3DesktopSparkspark-1.3.0-bin-hadoop2.4postgresql-9.2-1002.jdbc3.jar"
- pyspark --conf spark.executor.extraClassPath=<jdbc.jar> --driver-class-path <jdbc.jar> --jars <jdbc.jar> --master <master-URL>
- from pyspark import SparkContext, SparkConf
- from pyspark.sql import DataFrameReader, SQLContext
- import os
- sparkClassPath = os.getenv('SPARK_CLASSPATH', '/path/to/connector/postgresql-42.1.4.jar')
- # Populate configuration
- conf = SparkConf()
- conf.setAppName('application')
- conf.set('spark.jars', 'file:%s' % sparkClassPath)
- conf.set('spark.executor.extraClassPath', sparkClassPath)
- conf.set('spark.driver.extraClassPath', sparkClassPath)
- # Uncomment line below and modify ip address if you need to use cluster on different IP address
- #conf.set('spark.master', 'spark://127.0.0.1:7077')
- sc = SparkContext(conf=conf)
- sqlContext = SQLContext(sc)
- url = 'postgresql://127.0.0.1:5432/postgresql'
- properties = {'user':'username', 'password':'password'}
- df = DataFrameReader(sqlContext).jdbc(url='jdbc:%s' % url, table='tablename', properties=properties)
- df.printSchema()
- df.show()
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement