Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- mdf = dd.read_parquet(self.local_location + self.megafile, engine='pyarrow')
- inx = df.index.unique()
- start1 = '2016-01-01'
- end1 = pd.to_datetime(inx.values.min()).strftime('%Y-%m-%d')
- start2 = pd.to_datetime(inx.values.max()).strftime('%Y-%m-%d')
- end2 = '2029-01-01'
- mdf1 = mdf[start1:end1]
- mdf2 = mdf[start2:end2]
- if len(mdf1) > 0:
- df_usage1 = 1 + mdf1.memory_usage(deep=True).sum().compute() // 100000001
- if len(mdf2) > 0:
- df_usage2 = 1 + mdf1.memory_usage(deep=True).sum().compute() // 100000001
- mdf1 = mdf1.append(mdf2, npartitions=df_usage2)
- else:
- if len(mdf2) > 0:
- df_usage2 = 1 + mdf2.memory_usage(deep=True).sum().compute() // 100000001
- mdf1 = dd.from_pandas(df).append(mdf2, npartitions=df_usage2)
- mdf1 = mdf1.append(df, npartitions=df_usage1)
- {ValueError}Exactly one of npartitions and chunksize must be specified.
Add Comment
Please, Sign In to add comment