diff --git a/docs/notebooks/active_learning.pct.py b/docs/notebooks/active_learning.pct.py index c12b74ea72..89e7163728 100644 --- a/docs/notebooks/active_learning.pct.py +++ b/docs/notebooks/active_learning.pct.py @@ -1,5 +1,5 @@ # %% [markdown] -# # Active Learning +# # Active learning # %% [markdown] # Sometimes, we may just want to learn a black-box function, rather than optimizing it. This goal is known as active learning and corresponds to choosing query points that reduce our model uncertainty. This notebook demonstrates how to perform Bayesian active learning using Trieste. diff --git a/docs/notebooks/active_learning_for_binary_classification.pct.py b/docs/notebooks/active_learning_for_binary_classification.pct.py index d35609e195..58385ed404 100644 --- a/docs/notebooks/active_learning_for_binary_classification.pct.py +++ b/docs/notebooks/active_learning_for_binary_classification.pct.py @@ -1,5 +1,5 @@ # %% [markdown] -# # Active Learning for binary classification +# # Active learning for binary classification # %% import gpflow diff --git a/docs/notebooks/ask_tell_optimization.pct.py b/docs/notebooks/ask_tell_optimization.pct.py index e089e8fc07..c4a255286f 100644 --- a/docs/notebooks/ask_tell_optimization.pct.py +++ b/docs/notebooks/ask_tell_optimization.pct.py @@ -1,5 +1,5 @@ # %% [markdown] -# # Ask-Tell Optimization Interface +# # Ask-Tell optimization interface # %% [markdown] # In this notebook we will illustrate the use of an Ask-Tell interface in Trieste. It is useful for cases where you want to have greater control of the optimization loop, or when letting Trieste manage this loop is impossible. diff --git a/docs/notebooks/batch_optimization.pct.py b/docs/notebooks/batch_optimization.pct.py index 41bacc56df..6adfe0732c 100644 --- a/docs/notebooks/batch_optimization.pct.py +++ b/docs/notebooks/batch_optimization.pct.py @@ -1,5 +1,5 @@ # %% [markdown] -# # Batch Bayesian Optimization +# # Batch Bayesian optimization # %% [markdown] # Sometimes it is practically convenient to query several points at a time. This notebook demonstrates four ways to perfom batch Bayesian optimization with Trieste. diff --git a/docs/notebooks/expected_improvement.pct.py b/docs/notebooks/expected_improvement.pct.py index ecb55a32d2..1bd903dc0d 100644 --- a/docs/notebooks/expected_improvement.pct.py +++ b/docs/notebooks/expected_improvement.pct.py @@ -1,5 +1,5 @@ # %% [markdown] -# # Introduction to Bayesian Optimization +# # Introduction to Bayesian optimization # %% import numpy as np diff --git a/docs/notebooks/multifidelity_modelling.pct.py b/docs/notebooks/multifidelity_modelling.pct.py index 30d475280c..0858443c1d 100644 --- a/docs/notebooks/multifidelity_modelling.pct.py +++ b/docs/notebooks/multifidelity_modelling.pct.py @@ -16,7 +16,7 @@ import gpflow.kernels # %% [markdown] -# # Multifidelity Modelling +# # Multifidelity modelling # # This tutorial demonstrates the usage of the `MultifidelityAutoregressive` model for fitting multifidelity data. This is an implementation of the AR1 model initially described in . diff --git a/docs/notebooks/rembo.pct.py b/docs/notebooks/rembo.pct.py index 82f2c4ea45..b695fff07b 100644 --- a/docs/notebooks/rembo.pct.py +++ b/docs/notebooks/rembo.pct.py @@ -1,5 +1,5 @@ # %% [markdown] -# # High-dimensional Bayesian Optimization +# # High-dimensional Bayesian optimization # This notebook demonstrates a simple method for optimizing a high-dimensional (100-D) problem, where standard BO methods have trouble. # %% diff --git a/docs/notebooks/scalable_thompson_sampling_using_sparse_gaussian_processes.pct.py b/docs/notebooks/scalable_thompson_sampling_using_sparse_gaussian_processes.pct.py index 765bdc9519..079dbd0912 100644 --- a/docs/notebooks/scalable_thompson_sampling_using_sparse_gaussian_processes.pct.py +++ b/docs/notebooks/scalable_thompson_sampling_using_sparse_gaussian_processes.pct.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # %% [markdown] -# # Scalable Thompson Sampling +# # Scalable Thompson sampling # %% [markdown] # In our other [Thompson sampling notebook](thompson_sampling.pct.py) we demonstrate how to perform batch optimization using a traditional implementation of Thompson sampling that samples exactly from an underlying Gaussian Process surrogate model. Unfortunately, this approach incurs a large computational overhead that scales polynomially with the optimization budget and so cannot be applied to settings with larger optimization budgets, e.g. those where large batches (>>10) of points can be collected. diff --git a/docs/notebooks/thompson_sampling.pct.py b/docs/notebooks/thompson_sampling.pct.py index dc6c6e3f62..1d1f7320f5 100644 --- a/docs/notebooks/thompson_sampling.pct.py +++ b/docs/notebooks/thompson_sampling.pct.py @@ -1,5 +1,5 @@ # %% [markdown] -# # Thompson Sampling +# # Thompson sampling # %% import numpy as np diff --git a/docs/tutorials.rst b/docs/tutorials.rst index dd69ed4f35..72cfee53be 100644 --- a/docs/tutorials.rst +++ b/docs/tutorials.rst @@ -15,10 +15,10 @@ Tutorials ========= -Example optimization problems ------------------------------ +Optimization problems +--------------------- -The following tutorials explore various types of optimization problems using Trieste. +The following tutorials illustrate solving different types of optimization problems using Trieste. .. toctree:: :maxdepth: 1 @@ -44,7 +44,7 @@ The following tutorials explore various types of optimization problems using Tri Frequently asked questions -------------------------- -The following tutorials (or sections thereof) explain how to use and extend specific Trieste functionality. +The following tutorials explain how to use and extend specific Trieste functionality. * :doc:`How do I set up a basic Bayesian optimization routine?` * :doc:`How do I set up a batch Bayesian optimization routine?`