diff --git a/src/ensemble/ensemble.py b/src/ensemble/ensemble.py index 6909afb..402232c 100644 --- a/src/ensemble/ensemble.py +++ b/src/ensemble/ensemble.py @@ -476,20 +476,27 @@ def calc_ml_prediction(self, enX=None): if self.aux_input is not None: level_enX[n]['aux_input'] = self.aux_input[n] - # Index list of ensemble members list_member_index = list(ml_ne) - # Run prediction in parallel using p_map - en_pred = p_map( - self.sim.run_fwd_sim, - level_enX, - list_member_index, - num_cpus=no_tot_run, - disable=self.disable_tqdm, - **progbar_settings, - ) + ######################################################################################################## + # Number of parallel runs + if self.sim.input_dict.get('hpc', False): # Run prediction in parallel on hpc + en_pred = self.run_on_HPC(level_enX, batch_size=nparallel) + + # Parallelization on local machine using p_map + else: + en_pred = p_map( + self.sim.run_fwd_sim, + level_enX, + list_member_index, + num_cpus=no_tot_run, + disable=self.disable_tqdm, + **progbar_settings, + ) + ######################################################################################################## + # List successful runs and crashes list_crash = [indx for indx, el in enumerate(en_pred) if el is False] list_success = [indx for indx, el in enumerate(en_pred) if el is not False] @@ -531,10 +538,8 @@ def calc_ml_prediction(self, enX=None): en_pred[list_crash[index]] = deepcopy(en_pred[element]) - # Convert ensemble specific result into pred_data, and filter for NONE data - ml_pred_data.append([{typ: np.concatenate(tuple((el[ind][typ][:, np.newaxis]) for el in en_pred), axis=1) - if any(elem is not None for elem in tuple((el[ind][typ]) for el in en_pred)) - else None for typ in en_pred[0][0].keys()} for ind in range(len(en_pred[0]))]) + #Convert ensemble specific result into pred_data, and filter for NONE data + ml_pred_data.append(dtools.en_pred_to_pred_data(en_pred)) # loop over time instance first, and the level instance. self.pred_data = np.array(ml_pred_data).T.tolist() diff --git a/src/popt/loop/ensemble_base.py b/src/popt/loop/ensemble_base.py index 1e3ab39..ed3ca9d 100644 --- a/src/popt/loop/ensemble_base.py +++ b/src/popt/loop/ensemble_base.py @@ -112,7 +112,7 @@ def function(self, x, *args, **kwargs): self._aux_input() # check for ensmble - if len(x.shape) == 1: + if len(x.shape) == 1: x = x[:,np.newaxis] self.ne = self.num_models else: self.ne = x.shape[1] @@ -124,8 +124,8 @@ def function(self, x, *args, **kwargs): x = self._reorganize_multilevel_ensemble(x) x = self.scale_state(x).squeeze() - if self.enX is not None: - self.enX = self.scale_state(self.enX) + #if self.enX is not None: + # self.enX = self.scale_state(self.enX) # Evaluate the objective function if run_success: @@ -133,13 +133,13 @@ def function(self, x, *args, **kwargs): self.pred_data, input_dict=self.sim.input_dict, true_order=self.sim.true_order, - state=matrix_to_dict(self.enX, self.idX), + state=matrix_to_dict(x, self.idX), **kwargs ) else: func_values = np.inf # the simulations have crashed - if len(x.shape) == 1: + if len(x.shape) == 1: self.stateF = func_values else: self.enF = func_values @@ -258,13 +258,13 @@ def save_stateX(self, path='./', filetype='npz'): np.save(path + 'stateX.npy', stateX) def _reorganize_multilevel_ensemble(self, x): - if ('multilevel' in self.keys_en) and (len(x.shape) > 1): - ml_ne = self.keys_en['multilevel']['ml_ne'] - x = ot.toggle_ml_state(x, ml_ne) - return x - else: - return x - + # Only toggle multilevel state when x is truly an ensemble (2D with >1 columns). + # Treat shape (nx, 1) the same as a 1D vector. + if 'multilevel' in self.keys_en: + if isinstance(x,list) or ( x.ndim > 1 and (x.shape[1] > 1) ): + ml_ne = self.multilevel['ml_ne'] + x = ot.toggle_ml_state(x, ml_ne) + return x def _aux_input(self): """ diff --git a/src/popt/loop/ensemble_gaussian.py b/src/popt/loop/ensemble_gaussian.py index 411fc6a..57595a8 100644 --- a/src/popt/loop/ensemble_gaussian.py +++ b/src/popt/loop/ensemble_gaussian.py @@ -87,7 +87,7 @@ def gradient(self, x, *args, **kwargs): enX = np.random.multivariate_normal(self.stateX, self.covX, self.ne).T # Shift ensemble to have correct mean - self.enX = enX - enX.mean(axis=1, keepdims=True) + self.stateX[:,None] + enX = enX - enX.mean(axis=1, keepdims=True) + self.stateX[:,None] # Truncate to bounds if (self.lb is not None) and (self.ub is not None): @@ -123,10 +123,13 @@ def gradient(self, x, *args, **kwargs): index += ne if 'multilevel' in self.keys_en: - weight = np.array(self.keys_en['multilevel']['ml_weights']) - if not np.sum(weight) == 1.0: - weight = weight / np.sum(weight) - grad = np.dot(grad_ml, weight) + weight = np.array(self.multilevel['ml_weights']) + if len(weight) > 1: + if not np.sum(weight) == 1.0: + weight = weight / np.sum(weight) + grad = np.dot(grad_ml, weight) + else: + grad = grad_ml[0] else: grad = grad_ml[0] diff --git a/src/popt/loop/optimize.py b/src/popt/loop/optimize.py index 88a7c0c..c1ff1e7 100644 --- a/src/popt/loop/optimize.py +++ b/src/popt/loop/optimize.py @@ -209,7 +209,7 @@ def run_loop(self): self.logger(f'─────> EPF-EnOpt: {self.epf_iteration}, {r} (outer iteration, penalty factor)') # print epf info else: self.logger(f'─────> EPF-EnOpt: converged, no variables changed more than {conv_crit*100} %') # print epf info - final_obj_no_penalty = str(round(float(self.fun(self.xk)),4)) + final_obj_no_penalty = str( round( float( np.mean(self.fun(self.xk)) ),4) ) self.logger(f'─────> EPF-EnOpt: objective value without penalty = {final_obj_no_penalty}') # print epf info def save(self): """