Tuesday, 30 April 2019

Pyspark dataframe OrderBy partition level or overall?

When I do an orderBy on a pyspark dataframe does it sort the data across all partitions (i.e the entire result)? Or is the sorting at a partition level? If the later, then can anyone suggest how to do an orderBy across the data? I have an orderBy right at the end

My current code:

def extract_work(self, days_to_extract):

        source_folders = self.work_folder_provider.get_work_folders(s3_source_folder=self.work_source,
                                                                    warehouse_ids=self.warehouse_ids,
                                                                    days_to_extract=days_to_extract)
        source_df = self._load_from_s3(source_folders)

        # Partition and de-dupe the data-frame retaining latest
        source_df = self.data_frame_manager.partition_and_dedupe_data_frame(source_df,
                                                                            partition_columns=['binScannableId', 'warehouseId'],
                                                                            sort_key='cameraCaptureTimestampUtc',
                                                                            desc=True)
        # Filter out anything that does not qualify for virtual count.
        source_df = self._virtual_count_filter(source_df)

        history_folders = self.work_folder_provider.get_history_folders(s3_history_folder=self.history_source,
                                                                        days_to_extract=days_to_extract)
        history_df = self._load_from_s3(history_folders)

        # Filter out historical items
        if history_df:
            source_df = source_df.join(history_df, 'binScannableId', 'leftanti')
        else:
            self.logger.error("No History was found")

        # Sort by defectProbability
        source_df = source_df.orderBy(desc('defectProbability'))

        return source_df

def partition_and_dedupe_data_frame(data_frame, partition_columns, sort_key, desc): 
          if desc: 
            window = Window.partitionBy(partition_columns).orderBy(F.desc(sort_key)) 
          else: 
            window = Window.partitionBy(partition_columns).orderBy(F.asc(sort_key)) 

          data_frame = data_frame.withColumn('rank', F.rank().over(window)).filter(F.col('rank') == 1).drop('rank') 
          return data_frame

def _virtual_count_filter(self, source_df):
        df = self._create_data_frame()
        for key in self.virtual_count_thresholds.keys():
            temp_df = source_df.filter((source_df['expectedQuantity'] == key) & (source_df['defectProbability'] > self.virtual_count_thresholds[key]))
            df = df.union(temp_df)
        return df



from Pyspark dataframe OrderBy partition level or overall?

No comments:

Post a Comment