Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
33 changes: 19 additions & 14 deletions src/ensemble/ensemble.py
Original file line number Diff line number Diff line change
Expand Up @@ -476,20 +476,27 @@ def calc_ml_prediction(self, enX=None):
if self.aux_input is not None:
level_enX[n]['aux_input'] = self.aux_input[n]


# Index list of ensemble members
list_member_index = list(ml_ne)

# Run prediction in parallel using p_map
en_pred = p_map(
self.sim.run_fwd_sim,
level_enX,
list_member_index,
num_cpus=no_tot_run,
disable=self.disable_tqdm,
**progbar_settings,
)
########################################################################################################

# Number of parallel runs
if self.sim.input_dict.get('hpc', False): # Run prediction in parallel on hpc
en_pred = self.run_on_HPC(level_enX, batch_size=nparallel)

# Parallelization on local machine using p_map
else:
en_pred = p_map(
self.sim.run_fwd_sim,
level_enX,
list_member_index,
num_cpus=no_tot_run,
disable=self.disable_tqdm,
**progbar_settings,
)
########################################################################################################

# List successful runs and crashes
list_crash = [indx for indx, el in enumerate(en_pred) if el is False]
list_success = [indx for indx, el in enumerate(en_pred) if el is not False]
Expand Down Expand Up @@ -531,10 +538,8 @@ def calc_ml_prediction(self, enX=None):

en_pred[list_crash[index]] = deepcopy(en_pred[element])

# Convert ensemble specific result into pred_data, and filter for NONE data
ml_pred_data.append([{typ: np.concatenate(tuple((el[ind][typ][:, np.newaxis]) for el in en_pred), axis=1)
if any(elem is not None for elem in tuple((el[ind][typ]) for el in en_pred))
else None for typ in en_pred[0][0].keys()} for ind in range(len(en_pred[0]))])
#Convert ensemble specific result into pred_data, and filter for NONE data
ml_pred_data.append(dtools.en_pred_to_pred_data(en_pred))

# loop over time instance first, and the level instance.
self.pred_data = np.array(ml_pred_data).T.tolist()
Expand Down
24 changes: 12 additions & 12 deletions src/popt/loop/ensemble_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -112,7 +112,7 @@ def function(self, x, *args, **kwargs):
self._aux_input()

# check for ensmble
if len(x.shape) == 1:
if len(x.shape) == 1:
x = x[:,np.newaxis]
self.ne = self.num_models
else: self.ne = x.shape[1]
Expand All @@ -124,22 +124,22 @@ def function(self, x, *args, **kwargs):
x = self._reorganize_multilevel_ensemble(x)
x = self.scale_state(x).squeeze()

if self.enX is not None:
self.enX = self.scale_state(self.enX)
#if self.enX is not None:
# self.enX = self.scale_state(self.enX)

# Evaluate the objective function
if run_success:
func_values = self.obj_func(
self.pred_data,
input_dict=self.sim.input_dict,
true_order=self.sim.true_order,
state=matrix_to_dict(self.enX, self.idX),
state=matrix_to_dict(x, self.idX),
**kwargs
)
else:
func_values = np.inf # the simulations have crashed

if len(x.shape) == 1:
if len(x.shape) == 1:
self.stateF = func_values
else:
self.enF = func_values
Expand Down Expand Up @@ -258,13 +258,13 @@ def save_stateX(self, path='./', filetype='npz'):
np.save(path + 'stateX.npy', stateX)

def _reorganize_multilevel_ensemble(self, x):
if ('multilevel' in self.keys_en) and (len(x.shape) > 1):
ml_ne = self.keys_en['multilevel']['ml_ne']
x = ot.toggle_ml_state(x, ml_ne)
return x
else:
return x

# Only toggle multilevel state when x is truly an ensemble (2D with >1 columns).
# Treat shape (nx, 1) the same as a 1D vector.
if 'multilevel' in self.keys_en:
if isinstance(x,list) or ( x.ndim > 1 and (x.shape[1] > 1) ):
ml_ne = self.multilevel['ml_ne']
x = ot.toggle_ml_state(x, ml_ne)
return x

def _aux_input(self):
"""
Expand Down
13 changes: 8 additions & 5 deletions src/popt/loop/ensemble_gaussian.py
Original file line number Diff line number Diff line change
Expand Up @@ -87,7 +87,7 @@ def gradient(self, x, *args, **kwargs):
enX = np.random.multivariate_normal(self.stateX, self.covX, self.ne).T

# Shift ensemble to have correct mean
self.enX = enX - enX.mean(axis=1, keepdims=True) + self.stateX[:,None]
enX = enX - enX.mean(axis=1, keepdims=True) + self.stateX[:,None]

# Truncate to bounds
if (self.lb is not None) and (self.ub is not None):
Expand Down Expand Up @@ -123,10 +123,13 @@ def gradient(self, x, *args, **kwargs):
index += ne

if 'multilevel' in self.keys_en:
weight = np.array(self.keys_en['multilevel']['ml_weights'])
if not np.sum(weight) == 1.0:
weight = weight / np.sum(weight)
grad = np.dot(grad_ml, weight)
weight = np.array(self.multilevel['ml_weights'])
if len(weight) > 1:
if not np.sum(weight) == 1.0:
weight = weight / np.sum(weight)
grad = np.dot(grad_ml, weight)
else:
grad = grad_ml[0]
else:
grad = grad_ml[0]

Expand Down
2 changes: 1 addition & 1 deletion src/popt/loop/optimize.py
Original file line number Diff line number Diff line change
Expand Up @@ -209,7 +209,7 @@ def run_loop(self):
self.logger(f'─────> EPF-EnOpt: {self.epf_iteration}, {r} (outer iteration, penalty factor)') # print epf info
else:
self.logger(f'─────> EPF-EnOpt: converged, no variables changed more than {conv_crit*100} %') # print epf info
final_obj_no_penalty = str(round(float(self.fun(self.xk)),4))
final_obj_no_penalty = str( round( float( np.mean(self.fun(self.xk)) ),4) )
self.logger(f'─────> EPF-EnOpt: objective value without penalty = {final_obj_no_penalty}') # print epf info
def save(self):
"""
Expand Down
Loading