Skip to content

Commit

Permalink
SacessOptimizer: Fix passing parameters (#1201)
Browse files Browse the repository at this point in the history
* Fixes a bug during the cooperation step where best parameters wouldn't be shared correctly due to incorrect assignment to `SyncManager.Array`.
* Fixes a bug where free_indices are ignored, and thus, a wrong parameter vector is stored in local solutions and sent to the manager.
  • Loading branch information
dweindl authored Nov 21, 2023
1 parent 2d95ca4 commit 13e80c8
Show file tree
Hide file tree
Showing 2 changed files with 24 additions and 7 deletions.
9 changes: 7 additions & 2 deletions pypesto/optimize/ess/ess.py
Original file line number Diff line number Diff line change
Expand Up @@ -528,10 +528,15 @@ def _do_local_search(
f"{optimizer_result.exitflag}: {optimizer_result.message}"
)
if np.isfinite(optimizer_result.fval):
self.local_solutions.append(optimizer_result.x)
local_solution_x = optimizer_result.x[
optimizer_result.free_indices
]
local_solution_fx = optimizer_result.fval

self.local_solutions.append(local_solution_x)

self._maybe_update_global_best(
optimizer_result.x, optimizer_result.fval
local_solution_x, local_solution_fx
)
break

Expand Down
22 changes: 17 additions & 5 deletions pypesto/optimize/ess/sacess.py
Original file line number Diff line number Diff line change
Expand Up @@ -311,7 +311,9 @@ def reconfigure_worker(self, worker_idx: int) -> Dict:
].copy()
for setting in ["local_n2", "balance", "dim_refset"]:
if setting in leader_options:
self._ess_options[worker_idx] = leader_options[setting]
self._ess_options[worker_idx][setting] = leader_options[
setting
]
return self._ess_options[worker_idx].copy()

def submit_solution(
Expand Down Expand Up @@ -356,8 +358,14 @@ def submit_solution(
f"Accepted solution from worker {sender_idx}: {fx}."
)
# accept
if len(x) != len(self._best_known_x):
raise AssertionError(
f"Received solution with {len(x)} parameters, "
f"but expected {len(self._best_known_x)}."
)
for i, xi in enumerate(x):
self._best_known_x[i] = xi
self._best_known_fx.value = fx
self._best_known_x.value = x
self._worker_comms[sender_idx] += 1
self._worker_scores[sender_idx] = (
self._worker_comms[sender_idx] * elapsed_time_s
Expand Down Expand Up @@ -543,10 +551,14 @@ def _cooperate(self):
f"(known best: {self._best_known_fx}).",
)
if recv_fx < self._best_known_fx or (
not np.isfinite(self._best_known_fx) and np.isfinite(recv_x)
not np.isfinite(self._best_known_fx) and np.isfinite(recv_fx)
):
if not np.isfinite(recv_x).all():
raise AssertionError(
f"Received non-finite parameters {recv_x}."
)
self._logger.debug(
f"Worker {self._worker_idx} received better solution."
f"Worker {self._worker_idx} received better solution {recv_fx}."
)
self._best_known_fx = recv_fx
self._n_received_solutions += 1
Expand Down Expand Up @@ -577,7 +589,7 @@ def maybe_update_best(self, x: np.array, fx: float):
self._logger.debug(
f"Worker {self._worker_idx} maybe sending solution {fx}. "
f"best known: {self._best_known_fx}, "
f"rel change: {(self._best_known_fx - fx) / fx:.4g}, "
f"rel change: {(fx - self._best_known_fx) / fx:.4g}, "
f"threshold: {self._acceptance_threshold}"
)

Expand Down

0 comments on commit 13e80c8

Please sign in to comment.