Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- from pyspark.sql import SparkSession
- APP_NAME = "DataFrames"
- SPARK_URL = "local[*]"
- spark = SparkSession.builder.appName(APP_NAME) \
- .config('spark.ui.showConsoleProgress', 'false') \
- .getOrCreate()
- taxi = spark.read.load('/datasets/pickups_terminal_5.csv',
- format='csv', header='true', inferSchema='true')
- taxi = taxi.fillna(0)
- taxi.registerTempTable("taxi")
- # среднее количество заказов в день за периоды в 30 минут
- print(taxi.groupBy("date").mean().select("date", "avg(pickups)").show())
- # дни с самым большим в таблице средним арифметическим количеством заказов
- print(taxi.groupBy("date").mean().select("date", "avg(pickups)") \
- .sort("avg(pickups)", ascending=False).show())
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement