diff --git a/pufferlib/pufferl.py b/pufferlib/pufferl.py index 7132a19f9..ad315e4fc 100644 --- a/pufferlib/pufferl.py +++ b/pufferlib/pufferl.py @@ -802,9 +802,9 @@ def stop(self): self.stopped = True def downsample(data_list, num_points): - if not data_list or num_points <= 0: + if not data_list or num_points < 0: return [] - if num_points == 1: + if num_points <= 1: return [data_list[-1]] if len(data_list) <= num_points: return data_list diff --git a/pufferlib/sweep.py b/pufferlib/sweep.py index 73c510a62..0627f91d0 100644 --- a/pufferlib/sweep.py +++ b/pufferlib/sweep.py @@ -559,8 +559,8 @@ def __init__(self, self.gp_max_obs = gp_max_obs # train time bumps after 800? self.infer_batch_size = infer_batch_size - # Probably useful only when downsample=1 and each run is expensive. - self.use_success_prob = sweep_config['downsample'] == 1 + # Probably useful only when downsample is 0 or 1 in a config and each run is expensive. + self.use_success_prob = sweep_config['downsample'] in (0, 1) self.success_classifier = LogisticRegression(class_weight='balanced') # This model is conservative. Aggressive early stopping interferes with and hampers GP model learning.