Skip to content

Commit

Permalink
Overhaul of examples
Browse files Browse the repository at this point in the history
This commit updates/fixes/improves/adds to the set of examples.

- Two tour notebooks are included with everything users need to
know about how to use the package.
- A clear yet realist example of optimizing parameters of machine
learning models is included. It is a much nicer version of the old
sklearn_example script.
- A glorious example on how to utilize this package in a concurrent
fashion was added.
- The buggy and redundant xgboost example was removed.

PS: The advanced tour notebook is not quite done yet. However I needed
to merge this branch so people could submit PRs to the updated release
branch.
  • Loading branch information
fmfn authored and fmfn committed Nov 25, 2018
1 parent b06c2d4 commit c6a754e
Show file tree
Hide file tree
Showing 13 changed files with 1,261 additions and 343 deletions.
10 changes: 8 additions & 2 deletions bayes_opt/__init__.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,11 @@
from .bayesian_optimization import BayesianOptimization, Events
from .util import UtilityFunction
from .observer import ScreenLogger
from .observer import ScreenLogger, JSONLogger

__all__ = ["BayesianOptimization", "UtilityFunction", "Events", "ScreenLogger"]
__all__ = [
"BayesianOptimization",
"UtilityFunction",
"Events",
"ScreenLogger",
"JSONLogger",
]
16 changes: 8 additions & 8 deletions bayes_opt/bayesian_optimization.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ def dispatch(self, event):


class BayesianOptimization(Observable):
def __init__(self, f, pbounds, random_state=None, verbose=1):
def __init__(self, f, pbounds, random_state=None, verbose=2):
""""""
self._random_state = ensure_rng(random_state)

Expand Down Expand Up @@ -96,16 +96,18 @@ def max(self):
def res(self):
return self._space.res()

def register(self, x, target):
def register(self, params, target):
"""Expect observation with known target"""
self._space.register(x, target)
self._space.register(params, target)
self.dispatch(Events.OPTMIZATION_STEP)

def probe(self, x, lazy=True):
def probe(self, params, lazy=True):
"""Probe target of x"""
if lazy:
self._queue.add(x)
self._queue.add(params)
else:
self._space.probe(x)
self._space.probe(params)
self.dispatch(Events.OPTMIZATION_STEP)

def suggest(self, utility_function):
"""Most promissing point to probe next"""
Expand Down Expand Up @@ -166,9 +168,7 @@ def maximize(self,
iteration += 1

self.probe(x_probe, lazy=False)
self.dispatch(Events.OPTMIZATION_STEP)

# Notify about finished optimization
self.dispatch(Events.OPTMIZATION_END)

def set_bounds(self, new_bounds):
Expand Down
33 changes: 19 additions & 14 deletions bayes_opt/observer.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@ class ScreenLogger(_Tracker):
_default_cell_size = 9
_default_precision = 4

def __init__(self, verbose=0):
def __init__(self, verbose=2):
self._verbose = verbose
self._header_length = None
super(ScreenLogger, self).__init__()
Expand Down Expand Up @@ -101,11 +101,11 @@ def _step(self, instance, colour=Colours.black):
res = instance.res[-1]
cells = []

cells.append(self._format_number(self._iterations))
cells.append(self._format_number(self._iterations + 1))
cells.append(self._format_number(res["target"]))

for val in res["params"].values():
cells.append(self._format_number(val))
for key in instance.space.keys:
cells.append(self._format_number(res["params"][key]))

return "| " + " | ".join(map(colour, cells)) + " |"

Expand All @@ -120,21 +120,26 @@ def _header(self, instance):
self._header_length = len(line)
return line + "\n" + ("-" * self._header_length)

def _is_new_max(self, instance):
if self._previous_max is None:
self._previous_max = instance.max["target"]
return instance.max["target"] > self._previous_max

def update(self, event, instance):
if event == Events.OPTMIZATION_START:
line = self._header(instance)
line = self._header(instance) + "\n"
elif event == Events.OPTMIZATION_STEP:
colour = (
Colours.purple if
self._previous_max is None or
instance.max["target"] > self._previous_max else
Colours.black
)
line = self._step(instance, colour=colour)
is_new_max = self._is_new_max(instance)
if self._verbose == 1 and not is_new_max:
line = ""
else:
colour = Colours.purple if is_new_max else Colours.black
line = self._step(instance, colour=colour) + "\n"
elif event == Events.OPTMIZATION_END:
line = "=" * self._header_length
line = "=" * self._header_length + "\n"

print(line)
if self._verbose:
print(line, end="")
self._update_tracker(event, instance)

class JSONLogger(_Tracker):
Expand Down
2 changes: 1 addition & 1 deletion bayes_opt/target_space.py
Original file line number Diff line number Diff line change
Expand Up @@ -182,7 +182,7 @@ def probe(self, x):
x = self._as_array(x)

try:
y = self._cache[_hashable(x)]
target = self._cache[_hashable(x)]
except KeyError:
params = dict(zip(self._keys, x))
target = self.target_func(**params)
Expand Down
31 changes: 29 additions & 2 deletions bayes_opt/util.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,3 @@
from __future__ import print_function
from __future__ import division
import warnings
import numpy as np
from scipy.stats import norm
Expand Down Expand Up @@ -129,6 +127,35 @@ def _poi(x, gp, y_max, xi):
return norm.cdf(z)


def load_logs(optimizer, logs):
"""Load previous ...
"""
import json

if isinstance(logs, str):
logs = [logs]

for log in logs:
with open(log, "r") as j:
while True:
try:
iteration = next(j)
except StopIteration:
break

iteration = json.loads(iteration)
try:
optimizer.register(
x=iteration["params"],
target=iteration["target"],
)
except KeyError:
pass

return optimizer


def unique_rows(a):
"""
A function to trim repeated rows that may appear when optimizing.
Expand Down
Loading

0 comments on commit c6a754e

Please sign in to comment.