From f0be174848b2988b180627e7a6b07d159760ae07 Mon Sep 17 00:00:00 2001 From: HydrogenSulfate <490868991@qq.com> Date: Sun, 4 Feb 2024 02:03:10 +0000 Subject: [PATCH 1/2] restore 'by_epoch' for SchedulerList --- ppsci/optimizer/lr_scheduler.py | 1 + 1 file changed, 1 insertion(+) diff --git a/ppsci/optimizer/lr_scheduler.py b/ppsci/optimizer/lr_scheduler.py index 1f1dd75767..91a686554e 100644 --- a/ppsci/optimizer/lr_scheduler.py +++ b/ppsci/optimizer/lr_scheduler.py @@ -750,6 +750,7 @@ class SchedulerList: def __init__(self, scheduler_list: Tuple[lr.LRScheduler, ...]): super().__init__() self._sch_list = scheduler_list + self.by_epoch = False def step(self): for sch in self._sch_list: From 9780f6b5627797463b87f2ff154fb25230222b69 Mon Sep 17 00:00:00 2001 From: HydrogenSulfate <490868991@qq.com> Date: Sun, 4 Feb 2024 02:18:25 +0000 Subject: [PATCH 2/2] fix for epnn --- examples/epnn/functions.py | 19 ++++++++++++++----- ppsci/data/__init__.py | 5 ++++- 2 files changed, 18 insertions(+), 6 deletions(-) diff --git a/examples/epnn/functions.py b/examples/epnn/functions.py index 90400e5aff..00543d38f3 100644 --- a/examples/epnn/functions.py +++ b/examples/epnn/functions.py @@ -226,6 +226,11 @@ def __init__(self, data_state, data_stress, itrain): self.data_stress = data_stress self.itrain = itrain + def _cvt_to_ndarray(self, list_dict): + for key in list_dict: + list_dict[key] = np.asarray(list_dict[key]) + return list_dict + def get(self, epochs=1): # Slow if using BatchSampler to obtain data input_dict_train = { @@ -243,7 +248,7 @@ def get(self, epochs=1): label_dict_train = {"dummy_loss": []} label_dict_val = {"dummy_loss": []} for i in range(epochs): - shuffled_indices = paddle.randperm(n=self.data_state.x_train.shape[0]) + shuffled_indices = np.random.permutation(self.data_state.x_train.shape[0]) input_dict_train["state_x"].append( self.data_state.x_train[shuffled_indices[0 : self.itrain]] ) @@ -256,9 +261,9 @@ def get(self, epochs=1): input_dict_train["stress_y"].append( self.data_stress.y_train[shuffled_indices[0 : self.itrain]] ) - label_dict_train["dummy_loss"].append(paddle.to_tensor(0.0)) + label_dict_train["dummy_loss"].append(0.0) - shuffled_indices = paddle.randperm(n=self.data_state.x_valid.shape[0]) + shuffled_indices = np.random.permutation(self.data_state.x_valid.shape[0]) input_dict_val["state_x"].append( self.data_state.x_valid[shuffled_indices[0 : self.itrain]] ) @@ -271,7 +276,11 @@ def get(self, epochs=1): input_dict_val["stress_y"].append( self.data_stress.y_valid[shuffled_indices[0 : self.itrain]] ) - label_dict_val["dummy_loss"].append(paddle.to_tensor(0.0)) + label_dict_val["dummy_loss"].append(0.0) + input_dict_train = self._cvt_to_ndarray(input_dict_train) + label_dict_train = self._cvt_to_ndarray(label_dict_train) + input_dict_val = self._cvt_to_ndarray(input_dict_val) + label_dict_val = self._cvt_to_ndarray(label_dict_val) return input_dict_train, label_dict_train, input_dict_val, label_dict_val @@ -287,7 +296,7 @@ def __init__(self, dataset_path, train_p=0.6, cross_valid_p=0.2, test_p=0.2): def get_shuffled_data(self): # Need to set the seed, otherwise the loss will not match the precision ppsci.utils.misc.set_random_seed(seed=10) - shuffled_indices = paddle.randperm(n=self.x.shape[0]) + shuffled_indices = np.random.permutation(self.x.shape[0]) n_train = math.floor(self.train_p * self.x.shape[0]) n_cross_valid = math.floor(self.cross_valid_p * self.x.shape[0]) n_test = math.floor(self.test_p * self.x.shape[0]) diff --git a/ppsci/data/__init__.py b/ppsci/data/__init__.py index d8f34e4ef4..0405ecfaaa 100644 --- a/ppsci/data/__init__.py +++ b/ppsci/data/__init__.py @@ -89,7 +89,10 @@ def build_dataloader(_dataset, cfg): logger.warning( "`batch_size` is set to 1 as neither sampler config nor batch_size is set." ) - batch_sampler = None + batch_sampler = io.BatchSampler( + _dataset, + batch_size=cfg["batch_size"], + ) # build collate_fn if specified batch_transforms_cfg = cfg.pop("batch_transforms", None)