diff --git a/README.md b/README.md
index 005b18f6..da2f585e 100644
--- a/README.md
+++ b/README.md
@@ -45,6 +45,10 @@ If you are new to choice modelling, you can check this [resource](https://www.pu
- Ready-To-Use datasets:
- [SwissMetro](./choice_learn/datasets/data/swissmetro.csv.gz) from Bierlaire et al. (2001) [[2]](#citation)
- [ModeCanada](./choice_learn/datasets/data/ModeCanada.csv.gz) from Koppelman et al. (1993) [[3]](#citation)
+ - The Train dataset from Ben Akiva et al. (1993) [5](#citation)
+ - The Heating & Electricity datasets from Kenneth Train described [here](https://rdrr.io/cran/mlogit/man/Electricity.html) and [here](https://rdrr.io/cran/mlogit/man/Heating.html)
+ - The TaFeng dataset from [Kaggle](https://www.kaggle.com/datasets/chiranjivdas09/ta-feng-grocery-dataset)
+ -
### Models
- Ready-to-use models:
@@ -91,10 +95,12 @@ Choice-Learn requires the following:
- Python (>=3.8)
- NumPy (>=1.24)
- pandas (>=1.5)
+
For modelling you need:
- TensorFlow (>=2.13)
-Finally, an optional requirement used for report and LBFG-s use is:
-- tensorflow_probability (>=0.20.1)
+
+Finally, an optional requirement used for report and LBFG-S optimization is:
+- TensorFlow Probability (>=0.20.1)
## Usage
```python
@@ -151,6 +157,7 @@ A detailed documentation of this project is available [here](https://artefactory
[2][The Acceptance of Model Innovation: The Case of Swissmetro](https://www.researchgate.net/publication/37456549_The_acceptance_of_modal_innovation_The_case_of_Swissmetro), Bierlaire, M.; Axhausen, K., W.; Abay, G. (2001)\
[3][Applications and Interpretation of Nested Logit Models of Intercity Mode Choice](https://trid.trb.org/view/385097), Forinash, C., V.; Koppelman, F., S. (1993)\
[4][The Demand for Local Telephone Service: A Fully Discrete Model of Residential Calling Patterns and Service Choices](https://www.jstor.org/stable/2555538), Train K., E.; McFadden, D., L.; Moshe, B. (1987)\
+[5] [Estimation of Travel Choice Models with Randomly Distributed Values of Time](https://ideas.repec.org/p/fth/lavaen/9303.html), Ben-Akiva M; Bolduc D; Bradley M(1993)
### Code and Repositories
- [1][RUMnet](https://github.com/antoinedesir/rumnet)
diff --git a/choice_learn/data/choice_dataset.py b/choice_learn/data/choice_dataset.py
index 56448da1..9984a215 100644
--- a/choice_learn/data/choice_dataset.py
+++ b/choice_learn/data/choice_dataset.py
@@ -614,7 +614,7 @@ def __len__(self):
"""
return len(self.choices)
- def get_num_items(self):
+ def get_n_items(self):
"""Method to access the total number of different items.
Returns:
@@ -624,7 +624,7 @@ def get_num_items(self):
"""
return self.base_num_items
- def get_num_choices(self):
+ def get_n_choices(self):
"""Method to access the total number of different choices.
Redundant with __len__ method.
@@ -689,7 +689,7 @@ def _contexts_items_features_df_to_np(
sess_df.columns = sess_df.loc[items_id_column]
if features is not None:
contexts_items_features.append(sess_df[items_index].loc[features].T.values)
- contexts_items_availabilities.append(np.ones(len(items_index)))
+ contexts_items_availabilities.append(np.ones(len(items_index)).astype("float32"))
else:
sess_feats = []
sess_av = []
@@ -806,9 +806,15 @@ def from_single_wide_df(
else:
contexts_items_availabilities = None
- choices = df[choices_column]
+ choices = df[choices_column].to_numpy()
+ print("choice", choices)
if choice_mode == "items_id":
+ if items_id is None:
+ raise ValueError("items_id must be given to use choice_mode 'items_id'")
+ items_id = np.array(items_id)
choices = np.squeeze([np.where(items_id == c)[0] for c in choices])
+ if choices.shape[0] == 0:
+ raise ValueError("No choice found in the items_id list")
return ChoiceDataset(
fixed_items_features=fixed_items_features,
@@ -940,7 +946,7 @@ def summary(self):
print("%=====================================================================%")
print("%%% Summary of the dataset:")
print("%=====================================================================%")
- print("Number of items:", self.get_num_items())
+ print("Number of items:", self.get_n_items())
print(
"Number of choices:",
len(self),
@@ -1038,7 +1044,9 @@ def get_choices_batch(self, choices_indexes, features=None):
)
if self.contexts_items_availabilities is None:
- contexts_items_availabilities = np.ones((len(choices_indexes), self.base_num_items))
+ contexts_items_availabilities = np.ones(
+ (len(choices_indexes), self.base_num_items)
+ ).astype("float32")
else:
contexts_items_availabilities = self.contexts_items_availabilities[choices_indexes]
# .astype(self._return_types[3])
@@ -1179,7 +1187,7 @@ def get_choices_batch(self, choices_indexes, features=None):
)
if self.contexts_items_availabilities is None:
- contexts_items_availabilities = np.ones((self.base_num_items))
+ contexts_items_availabilities = np.ones((self.base_num_items)).astype("float32")
else:
contexts_items_availabilities = self.contexts_items_availabilities[choices_indexes]
@@ -1299,41 +1307,67 @@ def __getitem__(self, choices_indexes):
elif isinstance(choices_indexes, slice):
return self.__getitem__(list(range(*choices_indexes.indices(len(self.choices)))))
- if self.fixed_items_features[0] is None:
- fixed_items_features = None
- else:
+ try:
+ if self.fixed_items_features[0] is None:
+ fixed_items_features = None
+ else:
+ fixed_items_features = self.fixed_items_features
+ except TypeError:
fixed_items_features = self.fixed_items_features
- if self.contexts_features[0] is None:
+
+ try:
+ if self.contexts_features[0] is None:
+ contexts_features = None
+ else:
+ contexts_features = tuple(
+ self.contexts_features[i][choices_indexes]
+ for i in range(len(self.contexts_features))
+ )
+ except TypeError:
contexts_features = None
- else:
- contexts_features = tuple(
- self.contexts_features[i][choices_indexes]
- for i in range(len(self.contexts_features))
- )
- if self.contexts_items_features[0] is None:
+
+ try:
+ if self.contexts_items_features[0] is None:
+ contexts_items_features = None
+ else:
+ contexts_items_features = tuple(
+ self.contexts_items_features[i][choices_indexes]
+ for i in range(len(self.contexts_items_features))
+ )
+ except TypeError:
contexts_items_features = None
- else:
- contexts_items_features = tuple(
- self.contexts_items_features[i][choices_indexes]
- for i in range(len(self.contexts_items_features))
- )
- if self.fixed_items_features_names[0] is None:
+
+ try:
+ if self.fixed_items_features_names[0] is None:
+ fixed_items_features_names = None
+ else:
+ fixed_items_features_names = self.fixed_items_features_names
+ except TypeError:
fixed_items_features_names = None
- else:
- fixed_items_features_names = self.fixed_items_features_names
- if self.contexts_features_names[0] is None:
+ try:
+ if self.contexts_features_names[0] is None:
+ contexts_features_names = None
+ else:
+ contexts_features_names = self.contexts_features_names
+ except TypeError:
contexts_features_names = None
- else:
- contexts_features_names = self.contexts_features_names
- if self.contexts_items_features_names[0] is None:
+ try:
+ if self.contexts_items_features_names[0] is None:
+ contexts_items_features_names = None
+ else:
+ contexts_items_features_names = self.contexts_items_features_names
+ except TypeError:
contexts_items_features_names = None
- else:
- contexts_items_features_names = self.contexts_items_features_names
+
+ try:
+ contexts_items_availabilities = self.contexts_items_availabilities[choices_indexes]
+ except TypeError:
+ contexts_items_availabilities = None
return ChoiceDataset(
fixed_items_features=fixed_items_features,
contexts_features=contexts_features,
contexts_items_features=contexts_items_features,
- contexts_items_availabilities=self.contexts_items_availabilities[choices_indexes],
+ contexts_items_availabilities=contexts_items_availabilities,
choices=[self.choices[i] for i in choices_indexes],
fixed_items_features_names=fixed_items_features_names,
contexts_features_names=contexts_features_names,
@@ -1391,8 +1425,53 @@ def filter(self, bool_list):
Parameters
----------
bool_list : list of boolean
- list of booleans of length self.get_num_sessions() to filter sessions.
+ list of booleans of length self.get_n_contexts() to filter contexts.
True to keep, False to discard.
"""
indexes = [i for i, keep in enumerate(bool_list) if keep]
return self[indexes]
+
+ def get_n_fixed_items_features(self):
+ """Method to access the number of fixed items features.
+
+ Returns:
+ -------
+ int
+ number of fixed items features
+ """
+ if self.fixed_items_features is not None:
+ n_features = 0
+ for fixed_features in self.fixed_items_features:
+ n_features += fixed_features.shape[1]
+ return n_features
+ return 0
+
+ def get_n_contexts_features(self):
+ """Method to access the number of contexts features.
+
+ Returns:
+ -------
+ int
+ number of fixed items features
+ """
+ if self.contexts_features is not None:
+ n_features = 0
+ for context_features in self.contexts_features:
+ n_features += context_features.shape[1]
+ return n_features
+ return 0
+
+ def get_n_contexts_items_features(self):
+ """Method to access the number of context items features.
+
+ Returns:
+ -------
+ int
+ number of fixed items features
+ """
+ if self.contexts_items_features is not None:
+ n_features = 0
+ for contexts_items_features in self.contexts_items_features:
+ n_features += contexts_items_features.shape[2]
+ return n_features
+ return 0
diff --git a/choice_learn/data/indexer.py b/choice_learn/data/indexer.py
index ddba5383..29ed0e2d 100644
--- a/choice_learn/data/indexer.py
+++ b/choice_learn/data/indexer.py
@@ -295,7 +295,7 @@ def __getitem__(self, choices_indexes):
if self.choice_dataset.contexts_items_availabilities is None:
contexts_items_availabilities = np.ones(
(len(choices_indexes), self.choice_dataset.base_num_items)
- )
+ ).astype("float32")
else:
if hasattr(self.choice_dataset.contexts_items_availabilities, "batch"):
contexts_items_availabilities = (
@@ -440,7 +440,9 @@ def __getitem__(self, choices_indexes):
choice = self.choice_dataset.choices[choices_indexes]
if self.choice_dataset.contexts_items_availabilities is None:
- contexts_items_availabilities = np.ones((self.choice_dataset.base_num_items))
+ contexts_items_availabilities = np.ones(
+ (self.choice_dataset.base_num_items)
+ ).astype("float32")
else:
contexts_items_availabilities = self.choice_dataset.contexts_items_availabilities[
choices_indexes
diff --git a/choice_learn/datasets/__init__.py b/choice_learn/datasets/__init__.py
index a16bf199..8a7ec3f4 100644
--- a/choice_learn/datasets/__init__.py
+++ b/choice_learn/datasets/__init__.py
@@ -1,8 +1,5 @@
"""Init file for datasets module."""
-from .base import load_modecanada, load_swissmetro
+from .base import load_electricity, load_heating, load_modecanada, load_swissmetro
-__all__ = [
- "load_modecanada",
- "load_swissmetro",
-]
+__all__ = ["load_modecanada", "load_swissmetro", "load_electricity", "load_heating"]
diff --git a/choice_learn/datasets/base.py b/choice_learn/datasets/base.py
index 92b65eab..6c977f55 100644
--- a/choice_learn/datasets/base.py
+++ b/choice_learn/datasets/base.py
@@ -367,3 +367,163 @@ def load_modecanada(
choices_column=choice_column,
choice_mode="one_zero",
)
+
+
+def load_heating(
+ as_frame=False,
+ to_wide=False,
+):
+ """Load and return the Heating dataset from Kenneth Train.
+
+ Parameters
+ ----------
+ as_frame : bool, optional
+ Whether to return the dataset as pd.DataFrame. If not, returned as ChoiceDataset,
+ by default False.
+ return_desc : bool, optional
+ Whether to return the description, by default False.
+ to_wide : bool, optional
+ Whether to return the dataset in wide format,
+ by default False (an thus retuned in long format).
+
+ Returns:
+ --------
+ ChoiceDataset
+ Loaded Heating dataset
+ """
+ _ = to_wide
+ data_file_name = "heating_data.csv.gz"
+ names, data = load_gzip(data_file_name)
+
+ heating_df = pd.read_csv(resources.files(DATA_MODULE) / "heating_data.csv.gz")
+
+ if as_frame:
+ return heating_df
+
+ contexts_features = ["income", "agehed", "rooms", "region"]
+ choice = ["depvar"]
+ contexts_items_features = ["ic.", "oc."]
+ items = ["gc", "gr", "ec", "er", "hp"]
+
+ choices = np.array([items.index(val) for val in heating_df[choice].to_numpy().ravel()])
+ contexts = heating_df[contexts_features].to_numpy()
+ contexts_items = np.stack(
+ [
+ heating_df[[feat + item for feat in contexts_items_features]].to_numpy()
+ for item in items
+ ],
+ axis=1,
+ )
+ return ChoiceDataset(
+ contexts_features=contexts, contexts_items_features=contexts_items, choices=choices
+ )
+
+
+def load_electricity(
+ as_frame=False,
+ to_wide=False,
+):
+ """Load and return the Electricity dataset from Kenneth Train.
+
+ Parameters
+ ----------
+ as_frame : bool, optional
+ Whether to return the dataset as pd.DataFrame. If not, returned as ChoiceDataset,
+ by default False.
+ to_wide : bool, optional
+ Whether to return the dataset in wide format,
+ by default False (an thus retuned in long format).
+
+ Returns:
+ --------
+ ChoiceDataset
+ Loaded Electricity dataset
+ """
+ _ = to_wide
+ data_file_name = "electricity.csv.gz"
+ names, data = load_gzip(data_file_name)
+
+ elec_df = pd.read_csv(resources.files(DATA_MODULE) / data_file_name)
+ elec_df.choice = elec_df.choice.astype(int)
+ elec_df[["pf", "cl", "loc", "wk", "tod", "seas"]] = elec_df[
+ ["pf", "cl", "loc", "wk", "tod", "seas"]
+ ].astype(float)
+
+ if as_frame:
+ return elec_df
+
+ return ChoiceDataset.from_single_long_df(
+ df=elec_df,
+ contexts_items_features_columns=["pf", "cl", "loc", "wk", "tod", "seas"],
+ items_id_column="alt",
+ contexts_id_column="chid",
+ choice_mode="one_zero",
+ )
+
+
+def load_train(
+ as_frame=False,
+ to_wide=False,
+ return_desc=False,
+):
+ """Load and return the Train dataset from Koppleman et al. (1993).
+
+ Parameters
+ ----------
+ as_frame : bool, optional
+ Whether to return the dataset as pd.DataFrame. If not, returned as ChoiceDataset,
+ by default False.
+ to_wide : bool, optional
+ Whether to return the dataset in wide format,
+ by default False (an thus retuned in long format).
+ return_desc : bool, optional
+ Whether to return the description, by default False.
+
+ Returns:
+ --------
+ ChoiceDataset
+ Loaded Electricity dataset
+ """
+ desc = "A sample of 235 Dutchindividuals facing 2929 choice situations."
+ desc += """Ben-Akiva M, Bolduc D, Bradley M(1993).
+ “Estimation of Travel Choice Models with Randomly Distributed Values of Time.
+ ”Papers 9303, Laval-Recherche en Energie. https://ideas.repec.org/p/fth/lavaen/9303.html."""
+ _ = to_wide
+ data_file_name = "train_data.csv.gz"
+ names, data = load_gzip(data_file_name)
+
+ train_df = pd.read_csv(resources.files(DATA_MODULE) / data_file_name)
+
+ if return_desc:
+ return desc
+
+ if as_frame:
+ return train_df
+ train_df["choice"] = train_df.apply(lambda row: row.choice[-1], axis=1)
+ train_df = train_df.rename(
+ columns={
+ "price1": "1_price",
+ "time1": "1_time",
+ "change1": "1_change",
+ "comfort1": "1_comfort",
+ }
+ )
+ train_df = train_df.rename(
+ columns={
+ "price2": "2_price",
+ "time2": "2_time",
+ "change2": "2_change",
+ "comfort2": "2_comfort",
+ }
+ )
+ print(train_df.head())
+ return ChoiceDataset.from_single_wide_df(
+ df=train_df,
+ items_id=["1", "2"],
+ fixed_items_suffixes=None,
+ contexts_features_columns=["id"],
+ contexts_items_features_suffixes=["price", "time", "change", "comfort"],
+ contexts_items_availabilities_suffix=None,
+ choices_column="choice",
+ choice_mode="items_id",
+ )
diff --git a/choice_learn/datasets/data/electricity.csv.gz b/choice_learn/datasets/data/electricity.csv.gz
new file mode 100644
index 00000000..da0a7ee1
Binary files /dev/null and b/choice_learn/datasets/data/electricity.csv.gz differ
diff --git a/choice_learn/datasets/data/heating_data.csv.gz b/choice_learn/datasets/data/heating_data.csv.gz
new file mode 100644
index 00000000..5fd53831
Binary files /dev/null and b/choice_learn/datasets/data/heating_data.csv.gz differ
diff --git a/choice_learn/datasets/data/train_data.csv.gz b/choice_learn/datasets/data/train_data.csv.gz
new file mode 100644
index 00000000..540f7d32
Binary files /dev/null and b/choice_learn/datasets/data/train_data.csv.gz differ
diff --git a/choice_learn/models/base_model.py b/choice_learn/models/base_model.py
index 4808324e..c25162cc 100644
--- a/choice_learn/models/base_model.py
+++ b/choice_learn/models/base_model.py
@@ -20,6 +20,7 @@ def __init__(
label_smoothing=0.0,
normalize_non_buy=False,
optimizer="Adam",
+ tolerance=1e-8,
callbacks=None,
lr=0.001,
epochs=1,
@@ -38,6 +39,15 @@ def __init__(
normalization,by default True
callbacks : list of tf.kera callbacks, optional
List of callbacks to add to model.fit, by default None and only add History
+ optimizer : str, optional
+ Name of the tf.keras.optimizers to be used, by default "Adam"
+ tolerance : float, optional
+ Tolerance for the L-BFGS optimizer if applied, by default 1e-8
+ lr: float, optional
+ Learning rate for the optimizer if applied, by default 0.001
+ epochs: int, optional
+ (Max) Number of epochs to train the model, by default 1
+ batch_size: int, optional
"""
self.is_fitted = False
self.normalize_non_buy = normalize_non_buy
@@ -69,6 +79,7 @@ def __init__(
self.epochs = epochs
self.batch_size = batch_size
+ self.tolerance = tolerance
@abstractmethod
def compute_batch_utility(
@@ -346,7 +357,7 @@ def fit(
contexts_items_batch,
availabilities_batch,
choices_batch,
- )[0]
+ )[0]["optimized_loss"]
)
val_logs["val_loss"].append(test_losses[-1])
temps_logs = {k: tf.reduce_mean(v) for k, v in val_logs.items()}
@@ -432,11 +443,18 @@ def batch_predict(
# Compute loss from probabilities & actual choices
# batch_loss = self.loss(probabilities, c_batch, sample_weight=sample_weight)
- batch_loss = self.loss(
- y_pred=probabilities,
- y_true=tf.one_hot(choices, depth=probabilities.shape[1]),
- sample_weight=sample_weight,
- )
+ batch_loss = {
+ "optimized_loss": self.loss(
+ y_pred=probabilities,
+ y_true=tf.one_hot(choices, depth=probabilities.shape[1]),
+ sample_weight=sample_weight,
+ ),
+ "NegativeLogLikelihood": tf.keras.losses.CategoricalCrossentropy()(
+ y_pred=probabilities,
+ y_true=tf.one_hot(choices, depth=probabilities.shape[1]),
+ sample_weight=sample_weight,
+ ),
+ }
return batch_loss, probabilities
def save_model(self, path):
@@ -524,7 +542,7 @@ def predict_probas(self, choice_dataset, batch_size=-1):
return tf.concat(stacked_probabilities, axis=0)
- def evaluate(self, choice_dataset, batch_size=-1):
+ def evaluate(self, choice_dataset, sample_weight=None, batch_size=-1, mode="eval"):
"""Evaluates the model for each context and each product of a ChoiceDataset.
Predicts the probabilities according to the model and computes the Negative-Log-Likelihood
@@ -554,8 +572,12 @@ def evaluate(self, choice_dataset, batch_size=-1):
contexts_items_features=contexts_items_features,
contexts_items_availabilities=contexts_items_availabilities,
choices=choices,
+ sample_weight=sample_weight,
)
- batch_losses.append(loss)
+ if mode == "eval":
+ batch_losses.append(loss["NegativeLogLikelihood"])
+ elif mode == "optim":
+ batch_losses.append(loss["optimized_loss"])
if batch_size != -1:
last_batch_size = contexts_items_availabilities.shape[0]
coefficients = tf.concat(
@@ -567,13 +589,15 @@ def evaluate(self, choice_dataset, batch_size=-1):
batch_loss = tf.reduce_mean(batch_losses)
return batch_loss
- def _lbfgs_train_step(self, dataset):
+ def _lbfgs_train_step(self, dataset, sample_weight=None):
"""A factory to create a function required by tfp.optimizer.lbfgs_minimize.
Parameters
----------
dataset: ChoiceDataset
Dataset on which to estimate the paramters.
+ sample_weight: np.ndarray, optional
+ Sample weights to apply, by default None
Returns:
--------
@@ -636,7 +660,9 @@ def f(params_1d):
# update the parameters in the model
assign_new_model_parameters(params_1d)
# calculate the loss
- loss_value = self.evaluate(dataset, batch_size=-1)
+ loss_value = self.evaluate(
+ dataset, sample_weight=sample_weight, batch_size=-1, mode="optim"
+ )
# calculate gradients and convert to 1D tf.Tensor
grads = tape.gradient(loss_value, self.weights)
@@ -659,7 +685,7 @@ def f(params_1d):
f.history = []
return f
- def _fit_with_lbfgs(self, dataset, epochs=None, tolerance=1e-8):
+ def _fit_with_lbfgs(self, dataset, epochs=None, sample_weight=None, verbose=0):
"""Fit function for L-BFGS optimizer.
Replaces the .fit method when the optimizer is set to L-BFGS.
@@ -668,10 +694,12 @@ def _fit_with_lbfgs(self, dataset, epochs=None, tolerance=1e-8):
----------
dataset : ChoiceDataset
Dataset to be used for coefficients estimations
- n_epochs : int
+ epochs : int
Maximum number of epochs allowed to reach minimum
- tolerance : float, optional
- Maximum tolerance accepted, by default 1e-8
+ sample_weight : np.ndarray, optional
+ Sample weights to apply, by default None
+ verbose : int, optional
+ print level, for debugging, by default 0
Returns:
--------
@@ -684,7 +712,7 @@ def _fit_with_lbfgs(self, dataset, epochs=None, tolerance=1e-8):
if epochs is None:
epochs = self.epochs
- func = self._lbfgs_train_step(dataset)
+ func = self._lbfgs_train_step(dataset, sample_weight=sample_weight)
# convert initial model parameters to a 1D tf.Tensor
init_params = tf.dynamic_stitch(func.idx, self.weights)
@@ -694,7 +722,7 @@ def _fit_with_lbfgs(self, dataset, epochs=None, tolerance=1e-8):
value_and_gradients_function=func,
initial_position=init_params,
max_iterations=epochs,
- tolerance=tolerance,
+ tolerance=self.tolerance,
f_absolute_tolerance=-1,
f_relative_tolerance=-1,
)
@@ -702,29 +730,97 @@ def _fit_with_lbfgs(self, dataset, epochs=None, tolerance=1e-8):
# after training, the final optimized parameters are still in results.position
# so we have to manually put them back to the model
func.assign_new_model_parameters(results.position)
- print("L-BFGS Opimization finished:")
- print("---------------------------------------------------------------")
- print("Number of iterations:", results[2].numpy())
- print("Algorithm converged before reaching max iterations:", results[0].numpy())
+ if verbose > 0:
+ print("L-BFGS Opimization finished:")
+ print("---------------------------------------------------------------")
+ print("Number of iterations:", results[2].numpy())
+ print("Algorithm converged before reaching max iterations:", results[0].numpy())
return func.history
-class RandomChoiceModel(ChoiceModel):
- """Dumb model that randomly attributes utilities to products."""
+class BaseLatentClassModel(object): # TODO: should inherit ChoiceModel ?
+ """Base Class to work with Mixtures of models."""
- def __init__(self, **kwargs):
- """Initialization of the model."""
- super().__init__(**kwargs)
+ def __init__(
+ self,
+ n_latent_classes,
+ model_class,
+ model_parameters,
+ fit_method,
+ epochs,
+ optimizer=None,
+ add_exit_choice=False,
+ tolerance=1e-6,
+ lr=0.001,
+ ):
+ """Instantiation of the model mixture.
- def compute_batch_utility(
+ Parameters
+ ----------
+ n_latent_classes : int
+ Number of latent classes
+ model_class : BaseModel
+ class of models to get a mixture of
+ model_parameters : dict
+ hyper-parameters of the models
+ fit_method : str
+ Method to estimate the parameters: "EM", "MLE".
+ epochs : int
+ Number of epochs to train the model.
+ optimizer: str, optional
+ Name of the tf.keras.optimizers to be used if one is used, by default None
+ add_exit_choice : bool, optional
+ Whether or not to add an exit choice, by default False
+ tolerance: float, optional
+ Tolerance for the L-BFGS optimizer if applied, by default 1e-6
+ lr: float, optional
+ Learning rate for the optimizer if applied, by default 0.001
+ """
+ self.n_latent_classes = n_latent_classes
+ if isinstance(model_parameters, list):
+ if not len(model_parameters) == n_latent_classes:
+ raise ValueError(
+ """If you specify a list of hyper-parameters, it means that you want to use\
+ different hyper-parameters for each latent class. In this case, the length\
+ of the list must be equal to the number of latent classes."""
+ )
+ self.model_parameters = model_parameters
+ else:
+ self.model_parameters = [model_parameters] * n_latent_classes
+ self.model_class = model_class
+ self.fit_method = fit_method
+
+ self.epochs = epochs
+ self.add_exit_choice = add_exit_choice
+ self.tolerance = tolerance
+ self.optimizer = optimizer
+ self.lr = lr
+
+ self.loss = tf_ops.CustomCategoricalCrossEntropy(from_logits=False, label_smoothing=0)
+ self.instantiated = False
+
+ def instantiate(self, **kwargs):
+ """Instantiation."""
+ init_logit = tf.Variable(
+ tf.random_normal_initializer(0.0, 0.02, seed=42)(shape=(self.n_latent_classes - 1,)),
+ name="Latent-Logits",
+ )
+ self.latent_logits = init_logit
+ self.models = [self.model_class(**mp) for mp in self.model_parameters]
+ for model in self.models:
+ model.instantiate(**kwargs)
+
+ # @tf.function
+ def batch_predict(
self,
fixed_items_features,
contexts_features,
contexts_items_features,
contexts_items_availabilities,
choices,
+ sample_weight=None,
):
- """Computes the random utility for each product of each context.
+ """Function that represents one prediction (Probas + Loss) for one batch of a ChoiceDataset.
Parameters
----------
@@ -744,42 +840,58 @@ def compute_batch_utility(
choices_batch : np.ndarray
Choices
Shape must be (n_contexts, )
+ sample_weight : np.ndarray, optional
+ List samples weights to apply during the gradient descent to the batch elements,
+ by default None
Returns:
--------
- tf.Tensor
- (n_contexts, n_items) matrix of random utilities
+ tf.Tensor (1, )
+ Value of NegativeLogLikelihood loss for the batch
+ tf.Tensor (batch_size, n_items)
+ Probabilities for each product to be chosen for each context
"""
- # In order to avoid unused arguments warnings
- _ = fixed_items_features, contexts_features, contexts_items_availabilities, choices
- return np.squeeze(
- np.random.uniform(shape=(contexts_items_features.shape), minval=0, maxval=1)
+ # Compute utilities from features
+ utilities = self.compute_batch_utility(
+ fixed_items_features,
+ contexts_features,
+ contexts_items_features,
+ contexts_items_availabilities,
+ choices,
)
- def fit(**kwargs):
- """Make sure that nothing happens during .fit."""
- _ = kwargs
- return {}
-
-
-class DistribMimickingModel(ChoiceModel):
- """Dumb class model that mimicks the probabilities.
-
- It stores the encountered in the train datasets and always returns them
- """
-
- def __init__(self, **kwargs):
- """Initialization of the model."""
- super().__init__(**kwargs)
- self.weights = []
+ latent_probabilities = tf.concat(
+ [[tf.constant(1.0)], tf.math.exp(self.latent_logits)], axis=0
+ )
+ latent_probabilities = latent_probabilities / tf.reduce_sum(latent_probabilities)
+ # Compute probabilities from utilities & availabilties
+ probabilities = []
+ for i, class_utilities in enumerate(utilities):
+ class_probabilities = tf_ops.softmax_with_availabilities(
+ contexts_items_logits=class_utilities,
+ contexts_items_availabilities=contexts_items_availabilities,
+ normalize_exit=self.add_exit_choice,
+ axis=-1,
+ )
+ probabilities.append(class_probabilities * latent_probabilities[i])
+ # Summing over the latent classes
+ probabilities = tf.reduce_sum(probabilities, axis=0)
- def fit(self, choice_dataset, **kwargs):
- """Computes the choice frequency of each product and defines it as choice probabilities."""
- _ = kwargs
- choices = choice_dataset.choices
- for i in range(choice_dataset.get_num_items()):
- self.weights.append(tf.reduce_sum(tf.cast(choices == i, tf.float32)))
- self.weights = tf.stack(self.weights) / len(choices)
+ # Compute loss from probabilities & actual choices
+ # batch_loss = self.loss(probabilities, c_batch, sample_weight=sample_weight)
+ batch_loss = {
+ "optimized_loss": self.loss(
+ y_pred=probabilities,
+ y_true=tf.one_hot(choices, depth=probabilities.shape[1]),
+ sample_weight=sample_weight,
+ ),
+ "NegativeLogLikelihood": tf.keras.losses.CategoricalCrossentropy()(
+ y_pred=probabilities,
+ y_true=tf.one_hot(choices, depth=probabilities.shape[1]),
+ sample_weight=sample_weight,
+ ),
+ }
+ return batch_loss, probabilities
def compute_batch_utility(
self,
@@ -789,7 +901,9 @@ def compute_batch_utility(
contexts_items_availabilities,
choices,
):
- """Returns utility that is fixed. U = log(P).
+ """Latent class computation of utility.
+
+ It computes the utility for each of the latent models and stores them in a list.
Parameters
----------
@@ -810,19 +924,425 @@ def compute_batch_utility(
Choices
Shape must be (n_contexts, )
+ Returns:
+ --------
+ list of np.ndarray
+ List of:
+ Utility of each product for each context.
+ Shape must be (n_contexts, n_items)
+ for each of the latent models.
+ """
+ utilities = []
+ # Iterates over latent models
+ for model in self.models:
+ model_utilities = model.compute_batch_utility(
+ fixed_items_features=fixed_items_features,
+ contexts_features=contexts_features,
+ contexts_items_features=contexts_items_features,
+ contexts_items_availabilities=contexts_items_availabilities,
+ choices=choices,
+ )
+ utilities.append(model_utilities)
+ return utilities
+
+ def fit(self, dataset, sample_weight=None, verbose=0):
+ """Fit the model on a ChoiceDataset.
+
+ Parameters
+ ----------
+ dataset : ChoiceDataset
+ Dataset to be used for coefficients estimations
+ sample_weight : np.ndarray, optional
+ sample weights to apply, by default None
+ verbose : int, optional
+ print level, for debugging, by default 0
+
+ Returns:
+ --------
+ dict
+ Fit history
+ """
+ if self.fit_method.lower() == "em":
+ self.minf = np.log(1e-3)
+ print("Expectation-Maximization estimation algorithm not well implemented yet.")
+ return self._em_fit(dataset=dataset, sample_weight=sample_weight, verbose=verbose)
+
+ if self.fit_method.lower() == "mle":
+ if self.optimizer.lower() == "lbfgs" or self.optimizer.lower() == "l-bfgs":
+ return self._fit_with_lbfgs(
+ dataset=dataset, sample_weight=sample_weight, verbose=verbose
+ )
+
+ return self._fit_normal(dataset=dataset, sample_weight=sample_weight, verbose=verbose)
+
+ raise ValueError(f"Fit method not implemented: {self.fit_method}")
+
+ def evaluate(self, choice_dataset, sample_weight=None, batch_size=-1, mode="eval"):
+ """Evaluates the model for each context and each product of a ChoiceDataset.
+
+ Predicts the probabilities according to the model and computes the Negative-Log-Likelihood
+ loss from the actual choices.
+
+ Parameters
+ ----------
+ choice_dataset : ChoiceDataset
+ Dataset on which to apply to prediction
+
Returns:
--------
np.ndarray (n_contexts, n_items)
- Utilities
+ Choice probabilties for each context and each product
+ """
+ batch_losses = []
+ for (
+ fixed_items_features,
+ contexts_features,
+ contexts_items_features,
+ contexts_items_availabilities,
+ choices,
+ ) in choice_dataset.iter_batch(batch_size=batch_size):
+ loss, _ = self.batch_predict(
+ fixed_items_features=fixed_items_features,
+ contexts_features=contexts_features,
+ contexts_items_features=contexts_items_features,
+ contexts_items_availabilities=contexts_items_availabilities,
+ choices=choices,
+ sample_weight=sample_weight,
+ )
+ if mode == "eval":
+ batch_losses.append(loss["NegativeLogLikelihood"])
+ elif mode == "optim":
+ batch_losses.append(loss["optimized_loss"])
+ if batch_size != -1:
+ last_batch_size = contexts_items_availabilities.shape[0]
+ coefficients = tf.concat(
+ [tf.ones(len(batch_losses) - 1) * batch_size, [last_batch_size]], axis=0
+ )
+ batch_losses = tf.multiply(batch_losses, coefficients)
+ batch_loss = tf.reduce_sum(batch_losses) / len(choice_dataset)
+ else:
+ batch_loss = tf.reduce_mean(batch_losses)
+ return batch_loss
+
+ def _lbfgs_train_step(self, dataset, sample_weight=None):
+ """A factory to create a function required by tfp.optimizer.lbfgs_minimize.
+
+ Parameters
+ ----------
+ dataset: ChoiceDataset
+ Dataset on which to estimate the paramters.
+ sample_weight: np.ndarray, optional
+ Sample weights to apply, by default None
+
+ Returns:
+ --------
+ function
+ with the signature:
+ loss_value, gradients = f(model_parameters).
+ """
+ # obtain the shapes of all trainable parameters in the model
+ weights = []
+ w_to_model = []
+ w_to_model_indexes = []
+ for i, model in enumerate(self.models):
+ for j, w in enumerate(model.weights):
+ weights.append(w)
+ w_to_model.append(i)
+ w_to_model_indexes.append(j)
+ weights.append(self.latent_logits)
+ w_to_model.append(-1)
+ w_to_model_indexes.append(-1)
+ shapes = tf.shape_n(weights)
+ n_tensors = len(shapes)
+
+ # we'll use tf.dynamic_stitch and tf.dynamic_partition later, so we need to
+ # prepare required information first
+ count = 0
+ idx = [] # stitch indices
+ part = [] # partition indices
+
+ for i, shape in enumerate(shapes):
+ n = np.product(shape)
+ idx.append(tf.reshape(tf.range(count, count + n, dtype=tf.int32), shape))
+ part.extend([i] * n)
+ count += n
+
+ part = tf.constant(part)
+
+ @tf.function
+ def assign_new_model_parameters(params_1d):
+ """A function updating the model's parameters with a 1D tf.Tensor.
- Raises:
- -------
- ValueError
- If the model has not been fitted cannot evaluate the utility
+ Pararmeters
+ -----------
+ params_1d: tf.Tensor
+ a 1D tf.Tensor representing the model's trainable parameters.
+ """
+ params = tf.dynamic_partition(params_1d, part, n_tensors)
+ for i, (shape, param) in enumerate(zip(shapes, params)):
+ if w_to_model[i] != -1:
+ self.models[w_to_model[i]].weights[w_to_model_indexes[i]].assign(
+ tf.reshape(param, shape)
+ )
+ else:
+ self.latent_logits.assign(tf.reshape(param, shape))
+
+ # now create a function that will be returned by this factory
+ @tf.function
+ def f(params_1d):
+ """A function that can be used by tfp.optimizer.lbfgs_minimize.
+
+ This function is created by function_factory.
+
+ Parameters
+ ----------
+ params_1d: tf.Tensor
+ a 1D tf.Tensor.
+
+ Returns:
+ --------
+ tf.Tensor
+ A scalar loss and the gradients w.r.t. the `params_1d`.
+ tf.Tensor
+ A 1D tf.Tensor representing the gradients w.r.t. the `params_1d`.
+ """
+ # use GradientTape so that we can calculate the gradient of loss w.r.t. parameters
+ with tf.GradientTape() as tape:
+ # update the parameters in the model
+ assign_new_model_parameters(params_1d)
+ # calculate the loss
+ loss_value = self.evaluate(
+ dataset, sample_weight=sample_weight, batch_size=-1, mode="optim"
+ )
+ # calculate gradients and convert to 1D tf.Tensor
+ grads = tape.gradient(loss_value, weights)
+ grads = tf.dynamic_stitch(idx, grads)
+
+ # print out iteration & loss
+ f.iter.assign_add(1)
+
+ # store loss value so we can retrieve later
+ tf.py_function(f.history.append, inp=[loss_value], Tout=[])
+
+ return loss_value, grads
+
+ # store these information as members so we can use them outside the scope
+ f.iter = tf.Variable(0)
+ f.idx = idx
+ f.part = part
+ f.shapes = shapes
+ f.assign_new_model_parameters = assign_new_model_parameters
+ f.history = []
+ return f
+
+ def _fit_with_lbfgs(self, dataset, epochs=None, sample_weight=None, verbose=0):
+ """Fit function for L-BFGS optimizer.
+
+ Replaces the .fit method when the optimizer is set to L-BFGS.
+
+ Parameters
+ ----------
+ dataset : ChoiceDataset
+ Dataset to be used for coefficients estimations
+ epochs : int
+ Maximum number of epochs allowed to reach minimum
+ sample_weight : np.ndarray, optional
+ Sample weights to apply, by default None
+ verbose : int, optional
+ print level, for debugging, by default 0
+
+ Returns:
+ --------
+ dict
+ Fit history
+ """
+ # Only import tensorflow_probability if LBFGS optimizer is used, avoid unnecessary
+ # dependency
+ import tensorflow_probability as tfp
+
+ if epochs is None:
+ epochs = self.epochs
+ func = self._lbfgs_train_step(dataset, sample_weight=sample_weight)
+
+ # convert initial model parameters to a 1D tf.Tensor
+ init = []
+ for model in self.models:
+ for w in model.weights:
+ init.append(w)
+ init.append(self.latent_logits)
+ init_params = tf.dynamic_stitch(func.idx, init)
+
+ # train the model with L-BFGS solver
+ results = tfp.optimizer.lbfgs_minimize(
+ value_and_gradients_function=func,
+ initial_position=init_params,
+ max_iterations=epochs,
+ tolerance=-1,
+ f_absolute_tolerance=self.tolerance,
+ f_relative_tolerance=-1,
+ x_tolerance=-1,
+ )
+
+ # after training, the final optimized parameters are still in results.position
+ # so we have to manually put them back to the model
+ func.assign_new_model_parameters(results.position)
+ if verbose > 0:
+ print("L-BFGS Opimization finished:")
+ print("---------------------------------------------------------------")
+ print("Number of iterations:", results[2].numpy())
+ print("Algorithm converged before reaching max iterations:", results[0].numpy())
+ return func.history
+
+ def _gd_train_step(self, dataset, sample_weight=None):
+ pass
+
+ def _nothing(self, inputs):
+ """_summary_.
+
+ Parameters
+ ----------
+ inputs : _type_
+ _description_
+
+ Returns:
+ --------
+ _type_
+ _description_
+ """
+ latent_probas = tf.clip_by_value(
+ self.latent_logits - tf.reduce_max(self.latent_logits), self.minf, 0
+ )
+ latent_probas = tf.math.exp(latent_probas)
+ # latent_probas = tf.math.abs(self.logit_latent_probas) # alternative implementation
+ latent_probas = latent_probas / tf.reduce_sum(latent_probas)
+ proba_list = []
+ avail = inputs[4]
+ for q in range(self.n_latent_classes):
+ combined = self.models[q].compute_batch_utility(*inputs)
+ combined = tf.clip_by_value(
+ combined - tf.reduce_max(combined, axis=1, keepdims=True), self.minf, 0
+ )
+ combined = tf.keras.layers.Activation(activation=tf.nn.softmax)(combined)
+ # combined = tf.keras.layers.Softmax()(combined)
+ combined = combined * avail
+ combined = latent_probas[q] * tf.math.divide(
+ combined, tf.reduce_sum(combined, axis=1, keepdims=True)
+ )
+ combined = tf.expand_dims(combined, -1)
+ proba_list.append(combined)
+ # print(combined.get_shape()) # it is useful to print the shape of tensors for debugging
+
+ proba_final = tf.keras.layers.Concatenate(axis=2)(proba_list)
+ return tf.math.reduce_sum(proba_final, axis=2, keepdims=False)
+
+ def _expectation(self, dataset):
+ predicted_probas = [model.predict_probas(dataset) for model in self.models]
+ if np.sum(np.isnan(predicted_probas)) > 0:
+ print("Nan in probas")
+ predicted_probas = [
+ latent
+ * tf.gather_nd(
+ params=proba,
+ indices=tf.stack([tf.range(0, len(dataset), 1), dataset.choices], axis=1),
+ )
+ for latent, proba in zip(self.latent_logits, predicted_probas)
+ ]
+
+ # E-step
+ ###### FILL THE CODE BELOW TO ESTIMATE DETERMINE THE WEIGHTS (weights = xxx)
+ predicted_probas = np.stack(predicted_probas, axis=1) + 1e-10
+ loss = np.sum(np.log(np.sum(predicted_probas, axis=1)))
+
+ return predicted_probas / np.sum(predicted_probas, axis=1, keepdims=True), loss
+
+ def _maximization(self, dataset, verbose=0):
+ """_summary_.
+
+ Parameters
+ ----------
+ dataset : _type_
+ _description_
+ verbose : int, optional
+ print level, for debugging, by default 0
+
+ Returns:
+ --------
+ _type_
+ _description_
+ """
+ self.models = [self.model_class(**mp) for mp in self.model_parameters]
+ # M-step: MNL estimation
+ for q in range(self.n_latent_classes):
+ self.models[q].fit(dataset, sample_weight=self.weights[:, q], verbose=verbose)
+
+ # M-step: latent probability estimation
+ latent_probas = np.sum(self.weights, axis=0)
+
+ return latent_probas / np.sum(latent_probas)
+
+ def _em_fit(self, dataset, verbose=0):
+ """Fit with Expectation-Maximization Algorithm.
+
+ Parameters
+ ----------
+ dataset: ChoiceDataset
+ Dataset to be used for coefficients estimations
+ verbose : int, optional
+ print level, for debugging, by default 0
+
+ Returns:
+ --------
+ list
+ List of logits for each latent class
+ list
+ List of losses at each epoch
+ """
+ hist_logits = []
+ hist_loss = []
+ # Initialization
+ for model in self.models:
+ # model.instantiate()
+ model.fit(dataset, sample_weight=np.random.rand(len(dataset)), verbose=verbose)
+ for i in tqdm.trange(self.epochs):
+ self.weights, loss = self._expectation(dataset)
+ self.latent_logits = self._maximization(dataset, verbose=verbose)
+ hist_logits.append(self.latent_logits)
+ hist_loss.append(loss)
+ if np.sum(np.isnan(self.latent_logits)) > 0:
+ print("Nan in logits")
+ break
+ return hist_logits, hist_loss
+
+ def predict_probas(self, choice_dataset, batch_size=-1):
+ """Predicts the choice probabilities for each context and each product of a ChoiceDataset.
+
+ Parameters
+ ----------
+ choice_dataset : ChoiceDataset
+ Dataset on which to apply to prediction
+ batch_size : int, optional
+ Batch size to use for the prediction, by default -1
+
+ Returns:
+ --------
+ np.ndarray (n_contexts, n_items)
+ Choice probabilties for each context and each product
"""
- # In order to avoid unused arguments warnings
- _ = fixed_items_features, contexts_features, contexts_items_availabilities
- _ = contexts_items_features
- if self.weights is None:
- raise ValueError("Model not fitted")
- return np.stack([np.log(self.weights.numpy())] * len(choices), axis=0)
+ stacked_probabilities = []
+ for (
+ fixed_items_features,
+ contexts_features,
+ contexts_items_features,
+ contexts_items_availabilities,
+ choices,
+ ) in choice_dataset.iter_batch(batch_size=batch_size):
+ _, probabilities = self.batch_predict(
+ fixed_items_features=fixed_items_features,
+ contexts_features=contexts_features,
+ contexts_items_features=contexts_items_features,
+ contexts_items_availabilities=contexts_items_availabilities,
+ choices=choices,
+ )
+ stacked_probabilities.append(probabilities)
+
+ return tf.concat(stacked_probabilities, axis=0)
diff --git a/choice_learn/models/baseline_models.py b/choice_learn/models/baseline_models.py
new file mode 100644
index 00000000..4a93ebbf
--- /dev/null
+++ b/choice_learn/models/baseline_models.py
@@ -0,0 +1,124 @@
+"""Models to be used as baselines for choice modeling. Nothing smart here."""
+import numpy as np
+import tensorflow as tf
+
+from .base_model import ChoiceModel
+
+
+class RandomChoiceModel(ChoiceModel):
+ """Dumb model that randomly attributes utilities to products."""
+
+ def __init__(self, **kwargs):
+ """Initialization of the model."""
+ super().__init__(**kwargs)
+
+ def compute_batch_utility(
+ self,
+ fixed_items_features,
+ contexts_features,
+ contexts_items_features,
+ contexts_items_availabilities,
+ choices,
+ ):
+ """Computes the random utility for each product of each context.
+
+ Parameters
+ ----------
+ fixed_items_features : tuple of np.ndarray
+ Fixed-Item-Features: formatting from ChoiceDataset: a matrix representing the products
+ constant/fixed features.
+ Shape must be (n_items, n_items_features)
+ contexts_features : tuple of np.ndarray (contexts_features)
+ a batch of contexts features
+ Shape must be (n_contexts, n_contexts_features)
+ contexts_items_features : tuple of np.ndarray (contexts_items_features)
+ a batch of contexts items features
+ Shape must be (n_contexts, n_contexts_items_features)
+ contexts_items_availabilities : np.ndarray
+ A batch of contexts items availabilities
+ Shape must be (n_contexts, n_items)
+ choices_batch : np.ndarray
+ Choices
+ Shape must be (n_contexts, )
+
+ Returns:
+ --------
+ tf.Tensor
+ (n_contexts, n_items) matrix of random utilities
+ """
+ # In order to avoid unused arguments warnings
+ _ = fixed_items_features, contexts_features, contexts_items_availabilities, choices
+ return np.squeeze(
+ np.random.uniform(shape=(contexts_items_features.shape), minval=0, maxval=1)
+ )
+
+ def fit(**kwargs):
+ """Make sure that nothing happens during .fit."""
+ _ = kwargs
+ return {}
+
+
+class DistribMimickingModel(ChoiceModel):
+ """Dumb class model that mimicks the probabilities.
+
+ It stores the encountered in the train datasets and always returns them
+ """
+
+ def __init__(self, **kwargs):
+ """Initialization of the model."""
+ super().__init__(**kwargs)
+ self.weights = []
+
+ def fit(self, choice_dataset, **kwargs):
+ """Computes the choice frequency of each product and defines it as choice probabilities."""
+ _ = kwargs
+ choices = choice_dataset.choices
+ for i in range(choice_dataset.get_num_items()):
+ self.weights.append(tf.reduce_sum(tf.cast(choices == i, tf.float32)))
+ self.weights = tf.stack(self.weights) / len(choices)
+
+ def compute_batch_utility(
+ self,
+ fixed_items_features,
+ contexts_features,
+ contexts_items_features,
+ contexts_items_availabilities,
+ choices,
+ ):
+ """Returns utility that is fixed. U = log(P).
+
+ Parameters
+ ----------
+ fixed_items_features : tuple of np.ndarray
+ Fixed-Item-Features: formatting from ChoiceDataset: a matrix representing the products
+ constant/fixed features.
+ Shape must be (n_items, n_items_features)
+ contexts_features : tuple of np.ndarray (contexts_features)
+ a batch of contexts features
+ Shape must be (n_contexts, n_contexts_features)
+ contexts_items_features : tuple of np.ndarray (contexts_items_features)
+ a batch of contexts items features
+ Shape must be (n_contexts, n_contexts_items_features)
+ contexts_items_availabilities : np.ndarray
+ A batch of contexts items availabilities
+ Shape must be (n_contexts, n_items)
+ choices_batch : np.ndarray
+ Choices
+ Shape must be (n_contexts, )
+
+ Returns:
+ --------
+ np.ndarray (n_contexts, n_items)
+ Utilities
+
+ Raises:
+ -------
+ ValueError
+ If the model has not been fitted cannot evaluate the utility
+ """
+ # In order to avoid unused arguments warnings
+ _ = fixed_items_features, contexts_features, contexts_items_availabilities
+ _ = contexts_items_features
+ if self.weights is None:
+ raise ValueError("Model not fitted")
+ return np.stack([np.log(self.weights.numpy())] * len(choices), axis=0)
diff --git a/choice_learn/models/conditional_mnl.py b/choice_learn/models/conditional_mnl.py
index 5a95b60c..27f8f54d 100644
--- a/choice_learn/models/conditional_mnl.py
+++ b/choice_learn/models/conditional_mnl.py
@@ -324,6 +324,7 @@ def instantiate_from_specifications(self):
## Fill items_indexes here
# Better organize feat_to_weight and specifications
+ self.weights = weights
return weights
def _store_dataset_features_names(self, dataset):
@@ -629,16 +630,18 @@ def instantiate(
"""
# Possibility to stack weights to be faster ????
if items_features_names is None:
- items_features_names = []
+ items_features_names = [()]
if contexts_features_names is None:
- contexts_features_names = []
+ contexts_features_names = [()]
if contexts_items_features_names is None:
- contexts_items_features_names = []
+ contexts_items_features_names = [()]
weights = []
weights_count = 0
self._items_features_names = []
for feat_tuple in items_features_names:
tuple_names = []
+ if feat_tuple is None:
+ feat_tuple = ()
for feat in feat_tuple:
if feat in self.params.keys():
if self.params[feat] == "constant":
@@ -671,6 +674,8 @@ def instantiate(
self._contexts_features_names = []
for feat_tuple in contexts_features_names:
+ if feat_tuple is None:
+ feat_tuple = ()
tuple_names = []
for feat in feat_tuple:
if feat in self.params.keys():
@@ -706,6 +711,8 @@ def instantiate(
self._contexts_items_features_names = []
for feat_tuple in contexts_items_features_names:
+ if feat_tuple is None:
+ feat_tuple = ()
tuple_names = []
for feat in feat_tuple:
if feat in self.params.keys():
@@ -783,6 +790,7 @@ def instantiate(
self.instantiated = True
else:
raise ValueError("No weights instantiated")
+ self.weights = weights
return weights
def compute_batch_utility(
@@ -820,6 +828,7 @@ def compute_batch_utility(
Computed utilities of shape (n_choices, n_items).
"""
if isinstance(self.params, ModelSpecification):
+ print("Model in instantiated using manual specification")
return self.compute_batch_utility_from_specification(
fixed_items_features=fixed_items_features,
contexts_features=contexts_features,
@@ -1001,7 +1010,14 @@ def fit(self, choice_dataset, get_report=False, **kwargs):
self.report = self.compute_report(choice_dataset)
return fit
- def _fit_with_lbfgs(self, choice_dataset, epochs=None, tolerance=1e-8, get_report=False):
+ def _fit_with_lbfgs(
+ self,
+ choice_dataset,
+ epochs=None,
+ sample_weight=None,
+ get_report=False,
+ **kwargs,
+ ):
"""Specific fit function to estimate the paramters with LBFGS.
Parameters
@@ -1034,7 +1050,12 @@ def _fit_with_lbfgs(self, choice_dataset, epochs=None, tolerance=1e-8, get_repor
self.instantiated = True
if epochs is None:
epochs = self.epochs
- fit = super()._fit_with_lbfgs(choice_dataset, epochs, tolerance)
+ fit = super()._fit_with_lbfgs(
+ dataset=choice_dataset,
+ epochs=epochs,
+ sample_weight=sample_weight,
+ **kwargs,
+ )
if get_report:
self.report = self.compute_report(choice_dataset)
return fit
@@ -1113,7 +1134,7 @@ def get_weights_std(self, dataset):
probabilities = tf.nn.softmax(utilities, axis=-1)
loss = tf.keras.losses.CategoricalCrossentropy(reduction="sum")(
y_pred=probabilities,
- y_true=tf.one_hot(dataset.choices, depth=4),
+ y_true=tf.one_hot(dataset.choices, depth=probabilities.shape[1]),
)
# Compute the Jacobian
jacobian = tape_2.jacobian(loss, w)
diff --git a/choice_learn/models/latent_class_mnl.py b/choice_learn/models/latent_class_mnl.py
new file mode 100644
index 00000000..94561ff6
--- /dev/null
+++ b/choice_learn/models/latent_class_mnl.py
@@ -0,0 +1,349 @@
+"""Latent Class MNL models."""
+import copy
+
+import tensorflow as tf
+
+from .base_model import BaseLatentClassModel
+from .conditional_mnl import ConditionalMNL, ModelSpecification
+from .simple_mnl import SimpleMNL
+
+
+class LatentClassSimpleMNL(BaseLatentClassModel):
+ """Latent Class for SimpleMNL."""
+
+ def __init__(
+ self,
+ n_latent_classes,
+ fit_method,
+ epochs,
+ add_exit_choice=False,
+ tolerance=1e-6,
+ intercept=None,
+ optimizer="Adam",
+ lr=0.001,
+ **kwargs,
+ ):
+ """Initialization.
+
+ Parameters
+ ----------
+ n_latent_classes : int
+ Number of latent classes.
+ fit_method : str
+ Method to be used to estimate the model.
+ epochs : int
+ Number of epochs
+ add_exit_choice : bool, optional
+ Whether to normalize probabilities with exit choice, by default False
+ tolerance : float, optional
+ LBFG-S tolerance, by default 1e-6
+ intercept : str, optional
+ Type of intercept to include in the SimpleMNL.
+ Must be in (None, 'item', 'item-full', 'constant'), by default None
+ optimizer : str, optional
+ tf.keras.optimizers to be used, by default "Adam"
+ lr : float, optional
+ Learning rate to use for optimizer if relevant, by default 0.001
+ """
+ self.n_latent_classes = n_latent_classes
+ self.intercept = intercept
+ model_params = {
+ "add_exit_choice": add_exit_choice,
+ "intercept": intercept,
+ "optimizer": optimizer,
+ "tolerance": tolerance,
+ "lr": lr,
+ "epochs": epochs,
+ }
+
+ super().__init__(
+ model_class=SimpleMNL,
+ model_parameters=model_params,
+ n_latent_classes=n_latent_classes,
+ fit_method=fit_method,
+ epochs=epochs,
+ add_exit_choice=add_exit_choice,
+ tolerance=tolerance,
+ optimizer=optimizer,
+ lr=lr,
+ **kwargs,
+ )
+
+ def instantiate_latent_models(
+ self, n_items, n_fixed_items_features, n_contexts_features, n_contexts_items_features
+ ):
+ """Instantiation of the Latent Models that are SimpleMNLs.
+
+ Parameters
+ ----------
+ n_items : int
+ Number of items/aternatives to consider.
+ n_fixed_items_features : int
+ Number of fixed items features.
+ n_contexts_features : int
+ Number of contexts features
+ n_contexts_items_features : int
+ Number of contexts items features
+ """
+ for model in self.models:
+ model.indexes, model.weights = model.instantiate(
+ n_items, n_fixed_items_features, n_contexts_features, n_contexts_items_features
+ )
+ model.instantiated = True
+
+ def instantiate(
+ self, n_items, n_fixed_items_features, n_contexts_features, n_contexts_items_features
+ ):
+ """Instantiation of the Latent Class MNL model."""
+ self.latent_logits = tf.Variable(
+ tf.random_normal_initializer(0.0, 0.02, seed=42)(shape=(self.n_latent_classes - 1,)),
+ name="Latent-Logits",
+ )
+
+ self.models = [self.model_class(**mp) for mp in self.model_parameters]
+
+ self.instantiate_latent_models(
+ n_items=n_items,
+ n_fixed_items_features=n_fixed_items_features,
+ n_contexts_features=n_contexts_features,
+ n_contexts_items_features=n_contexts_items_features,
+ )
+
+ def fit(self, dataset, **kwargs):
+ """Fit the model to the dataset.
+
+ Parameters
+ ----------
+ dataset : ChoiceDataset
+ Dataset to fit the model to.
+ """
+ if not self.instantiated:
+ self.instantiate(
+ n_items=dataset.get_n_items(),
+ n_fixed_items_features=dataset.get_n_fixed_items_features(),
+ n_contexts_features=dataset.get_n_contexts_features(),
+ n_contexts_items_features=dataset.get_n_contexts_items_features(),
+ )
+ return super().fit(dataset, **kwargs)
+
+
+class LatentClassConditionalMNL(BaseLatentClassModel):
+ """Latent Class for ConditionalMNL."""
+
+ def __init__(
+ self,
+ n_latent_classes,
+ fit_method,
+ parameters=None,
+ epochs=1,
+ add_exit_choice=False,
+ tolerance=1e-6,
+ optimizer="Adam",
+ lr=0.001,
+ **kwargs,
+ ):
+ """Initialization.
+
+ Parameters
+ ----------
+ n_latent_classes : int
+ Number of latent classes.
+ fit_method : str
+ Method to be used to estimate the model.
+ parameters : dict or ModelSpecification
+ Dictionnary containing the parametrization of the model.
+ The dictionnary must have the following structure:
+ {feature_name_1: mode_1, feature_name_2: mode_2, ...}
+ mode must be among "constant", "item", "item-full" for now
+ (same specifications as torch-choice).
+ epochs : int
+ Number of epochs
+ add_exit_choice : bool, optional
+ Whether to normalize probabilities with exit choice, by default False
+ tolerance : float, optional
+ LBFG-S tolerance, by default 1e-6
+ optimizer : str, optional
+ tf.keras.optimizers to be used, by default "Adam"
+ lr : float, optional
+ Learning rate to use for optimizer if relevant, by default 0.001
+ """
+ self.n_latent_classes = n_latent_classes
+ self.fit_method = fit_method
+ self.params = parameters
+ self.epochs = epochs
+ self.add_exit_choice = add_exit_choice
+ self.tolerance = tolerance
+ self.optimizer = optimizer
+ self.lr = lr
+
+ model_params = {
+ "parameters": self.params,
+ "add_exit_choice": self.add_exit_choice,
+ "optimizer": self.optimizer,
+ "tolerance": self.tolerance,
+ "lr": self.lr,
+ "epochs": self.epochs,
+ }
+
+ super().__init__(
+ model_class=ConditionalMNL,
+ model_parameters=model_params,
+ n_latent_classes=n_latent_classes,
+ fit_method=fit_method,
+ epochs=epochs,
+ add_exit_choice=add_exit_choice,
+ tolerance=tolerance,
+ optimizer=optimizer,
+ lr=lr,
+ **kwargs,
+ )
+
+ def instantiate_latent_models(
+ self,
+ n_items,
+ items_features_names,
+ contexts_features_names,
+ contexts_items_features_names,
+ ):
+ """Instantiation of the Latent Models that are SimpleMNLs.
+
+ Parameters
+ ----------
+ n_items : int
+ Number of items/aternatives to consider.
+ items_features_names: str,
+ Names of fixed_items_features
+ contexts_features_names: str,
+ Names of contexts features
+ contexts_items_features_names: str,
+ Names of contexts items features
+ """
+ if isinstance(self.params, ModelSpecification):
+ for model in self.models:
+ model.params = copy.deepcopy(self.params)
+ model.weights = model.instantiate_from_specifications()
+
+ model._items_features_names = items_features_names
+ model._contexts_features_names = contexts_features_names
+ model._contexts_items_features_names = contexts_items_features_names
+ else:
+ for model in self.models:
+ model.params = self.params
+ model.indexes, model.weights = model.instantiate(
+ num_items=n_items,
+ items_features_names=items_features_names,
+ contexts_features_names=contexts_features_names,
+ contexts_items_features_names=contexts_items_features_names,
+ )
+ model.instantiated = True
+
+ def instantiate(
+ self,
+ n_items,
+ items_features_names,
+ contexts_features_names,
+ contexts_items_features_names,
+ ):
+ """Instantiation of the Latent Class MNL model."""
+ self.latent_logits = tf.Variable(
+ tf.random_normal_initializer(0.0, 0.02, seed=42)(shape=(self.n_latent_classes - 1,)),
+ name="Latent-Logits",
+ )
+
+ self.models = [self.model_class(**mp) for mp in self.model_parameters]
+
+ self.instantiate_latent_models(
+ n_items=n_items,
+ items_features_names=items_features_names,
+ contexts_features_names=contexts_features_names,
+ contexts_items_features_names=contexts_items_features_names,
+ )
+
+ def add_coefficients(
+ self, coefficient_name, feature_name, items_indexes=None, items_names=None
+ ):
+ """Adds a coefficient to the model throught the specification of the utility.
+
+ Parameters
+ ----------
+ coefficient_name : str
+ Name given to the coefficient.
+ feature_name : str
+ features name to which the coefficient is associated. It should work with
+ the names given.
+ in the ChoiceDataset that will be used for parameters estimation.
+ items_indexes : list of int, optional
+ list of items indexes (in the ChoiceDataset) for which we need to add a coefficient,
+ by default None
+ items_names : list of str, optional
+ list of items names (in the ChoiceDataset) for which we need to add a coefficient,
+ by default None
+
+ Raises:
+ -------
+ ValueError
+ When names or indexes are both not specified.
+ """
+ if self.params is None:
+ self.params = ModelSpecification()
+ elif not isinstance(self.params, ModelSpecification):
+ raise ValueError("Cannot add coefficient on top of a dict instantiation.")
+ self.params.add_coefficients(
+ coefficient_name=coefficient_name,
+ feature_name=feature_name,
+ items_indexes=items_indexes,
+ items_names=items_names,
+ )
+
+ def add_shared_coefficient(
+ self, coefficient_name, feature_name, items_indexes=None, items_names=None
+ ):
+ """Adds a single, shared coefficient to the model throught the specification of the utility.
+
+ Parameters
+ ----------
+ coefficient_name : str
+ Name given to the coefficient.
+ feature_name : str
+ features name to which the coefficient is associated. It should work with
+ the names given.
+ in the ChoiceDataset that will be used for parameters estimation.
+ items_indexes : list of int, optional
+ list of items indexes (in the ChoiceDataset) for which the coefficient will be used,
+ by default None
+ items_names : list of str, optional
+ list of items names (in the ChoiceDataset) for which the coefficient will be used,
+ by default None
+
+ Raises:
+ -------
+ ValueError
+ When names or indexes are both not specified.
+ """
+ if self.params is None:
+ self.params = ModelSpecification()
+ elif not isinstance(self.params, ModelSpecification):
+ raise ValueError("Cannot add shared coefficient on top of a dict instantiation.")
+ self.params.add_shared_coefficient(
+ coefficient_name=coefficient_name,
+ feature_name=feature_name,
+ items_indexes=items_indexes,
+ items_names=items_names,
+ )
+
+ def fit(self, dataset, **kwargs):
+ """Fit the model to the dataset.
+
+ Parameters
+ ----------
+ dataset : ChoiceDataset
+ Dataset to fit the model to.
+ """
+ if not self.instantiated:
+ self.instantiate(
+ n_items=dataset.get_n_items(),
+ items_features_names=dataset.fixed_items_features_names,
+ contexts_features_names=dataset.contexts_features_names,
+ contexts_items_features_names=dataset.contexts_items_features_names,
+ )
+ return super().fit(dataset, **kwargs)
diff --git a/choice_learn/models/rumnet.py b/choice_learn/models/rumnet.py
index 09f08fa9..2872bce7 100644
--- a/choice_learn/models/rumnet.py
+++ b/choice_learn/models/rumnet.py
@@ -1133,9 +1133,17 @@ def batch_predict(
probabilities = tf.divide(
probabilities, tf.reduce_sum(probabilities, axis=1, keepdims=True) + 1e-5
)
- batch_loss = self.loss(
- y_pred=probabilities,
- y_true=tf.one_hot(choices, depth=probabilities.shape[1]),
- sample_weight=sample_weight,
- )
+
+ batch_loss = {
+ "optimized_loss": self.loss(
+ y_pred=probabilities,
+ y_true=tf.one_hot(choices, depth=probabilities.shape[1]),
+ sample_weight=sample_weight,
+ ),
+ "NegativeLogLikelihood": tf.keras.losses.CategoricalCrossentropy()(
+ y_pred=probabilities,
+ y_true=tf.one_hot(choices, depth=probabilities.shape[1]),
+ sample_weight=sample_weight,
+ ),
+ }
return batch_loss, probabilities
diff --git a/choice_learn/models/simple_mnl.py b/choice_learn/models/simple_mnl.py
new file mode 100644
index 00000000..b23bf9f2
--- /dev/null
+++ b/choice_learn/models/simple_mnl.py
@@ -0,0 +1,368 @@
+"""Implementation of the simple linear multinomial logit model.
+
+It is a multi output logistic regression.
+"""
+
+import pandas as pd
+import tensorflow as tf
+
+from .base_model import ChoiceModel
+
+
+class SimpleMNL(ChoiceModel):
+ """Simple MNL with one linear coefficient to estimate by feature."""
+
+ def __init__(
+ self,
+ add_exit_choice=False,
+ intercept=None,
+ optimizer="Adam",
+ lr=0.001,
+ **kwargs,
+ ):
+ """Initialization of Simple-MNL.
+
+ Parameters:
+ -----------
+ add_exit_choice : bool, optional
+ Whether or not to normalize the probabilities computation with an exit choice
+ whose utility would be 1, by default True
+ optimizer: str
+ TensorFlow optimizer to be used for estimation
+ lr: float
+ Learning Rate to be used with optimizer.
+ """
+ super().__init__(normalize_non_buy=add_exit_choice, optimizer=optimizer, lr=lr, **kwargs)
+ self.instantiated = False
+ self.intercept = intercept
+
+ def instantiate(
+ self, n_items, n_fixed_items_features, n_contexts_features, n_contexts_items_features
+ ):
+ """Instantiate the model from ModelSpecification object.
+
+ Parameters
+ --------
+ Parameters
+ ----------
+ n_items : int
+ Number of items/aternatives to consider.
+ n_fixed_items_features : int
+ Number of fixed items features.
+ n_contexts_features : int
+ Number of contexts features
+ n_contexts_items_features : int
+ Number of contexts items features
+
+ Returns:
+ --------
+ list of tf.Tensor
+ List of the weights created coresponding to the specification.
+ """
+ weights = []
+ indexes = {}
+ for n_feat, feat_name in zip(
+ [n_fixed_items_features, n_contexts_features, n_contexts_items_features],
+ ["items", "contexts", "contexts_items"],
+ ):
+ if n_feat > 0:
+ weights = [
+ tf.Variable(
+ tf.random_normal_initializer(0.0, 0.02, seed=42)(shape=(n_feat,)),
+ name=f"Weights_{feat_name}",
+ )
+ ]
+ indexes[feat_name] = len(weights) - 1
+ if self.intercept is None:
+ print("No intercept in the model")
+ elif self.intercept == "item":
+ weights.append(
+ tf.Variable(
+ tf.random_normal_initializer(0.0, 0.02, seed=42)(shape=(n_items - 1,)),
+ name="Intercept",
+ )
+ )
+ indexes["intercept"] = len(weights) - 1
+ elif self.intercept == "item-full":
+ print("Are you sure you do not want to normalize an intercept to 0?")
+ weights.append(
+ tf.Variable(
+ tf.random_normal_initializer(0.0, 0.02, seed=42)(shape=(n_items,)),
+ name="Intercept",
+ )
+ )
+ indexes["intercept"] = len(weights) - 1
+ else:
+ weights.append(
+ tf.Variable(
+ tf.random_normal_initializer(0.0, 0.02, seed=42)(shape=(1,)),
+ name="Intercept",
+ )
+ )
+ indexes["intercept"] = len(weights) - 1
+
+ self.instantiated = True
+ self.indexes = indexes
+ self.weights = weights
+ return indexes, weights
+
+ def compute_batch_utility(
+ self,
+ fixed_items_features,
+ contexts_features,
+ contexts_items_features,
+ contexts_items_availabilities,
+ choices,
+ ):
+ """Main method to compute the utility of the model. Selects the right method to compute.
+
+ Parameters
+ ----------
+ fixed_items_features : tuple of np.ndarray
+ Fixed-Item-Features: formatting from ChoiceDataset: a matrix representing the products
+ constant/fixed features.
+ Shape must be (n_items, n_items_features)
+ contexts_features : tuple of np.ndarray (contexts_features)
+ a batch of contexts features
+ Shape must be (n_contexts, n_contexts_features)
+ contexts_items_features : tuple of np.ndarray (contexts_items_features)
+ a batch of contexts items features
+ Shape must be (n_contexts, n_contexts_items_features)
+ contexts_items_availabilities : np.ndarray
+ A batch of contexts items availabilities
+ Shape must be (n_contexts, n_items)
+ choices_batch : np.ndarray
+ Choices
+ Shape must be (n_contexts, )
+
+ Returns:
+ --------
+ tf.Tensor
+ Computed utilities of shape (n_choices, n_items).
+ """
+ _, _ = contexts_items_availabilities, choices
+ if "items" in self.indexes.keys():
+ if isinstance(fixed_items_features, tuple):
+ fixed_items_features = tf.concat(*fixed_items_features, axis=1)
+ fixed_items_utilities = tf.tensordot(
+ fixed_items_features, self.weights[self.indexes["items"]], axes=1
+ )
+ else:
+ fixed_items_utilities = 0
+
+ if "contexts" in self.indexes.keys():
+ if isinstance(contexts_features, tuple):
+ contexts_features = tf.concat(*contexts_features, axis=1)
+ contexts_utilities = tf.tensordot(
+ contexts_features, self.weights[self.indexes["contexts"]], axes=1
+ )
+ contexts_utilities = tf.expand_dims(contexts_utilities, axis=0)
+ else:
+ contexts_utilities = 0
+
+ if "contexts_items" in self.indexes.keys():
+ if isinstance(contexts_items_features, tuple):
+ contexts_items_features = tf.concat([*contexts_items_features], axis=2)
+ contexts_items_utilities = tf.tensordot(
+ contexts_items_features, self.weights[self.indexes["contexts_items"]], axes=1
+ )
+ else:
+ contexts_utilities = tf.zeros(
+ (contexts_utilities.shape[0], fixed_items_utilities.shape[1], 1)
+ )
+
+ if "intercept" in self.indexes.keys():
+ intercept = self.weights[self.indexes["intercept"]]
+ if self.intercept == "item":
+ intercept = tf.concat([tf.constant([0.0]), intercept], axis=0)
+ if self.intercept in ["item", "item-full"]:
+ intercept = tf.expand_dims(intercept, axis=0)
+ else:
+ intercept = 0
+
+ return fixed_items_utilities + contexts_utilities + contexts_items_utilities + intercept
+
+ def fit(self, choice_dataset, get_report=False, **kwargs):
+ """Main fit function to estimate the paramters.
+
+ Parameters
+ ----------
+ choice_dataset : ChoiceDataset
+ Choice dataset to use for the estimation.
+ get_report: bool, optional
+ Whether or not to compute a report of the estimation, by default False
+
+ Returns:
+ --------
+ ConditionalMNL
+ With estimated weights.
+ """
+ if not self.instantiated:
+ # Lazy Instantiation
+ print("Instantiation")
+ self.indexes, self.weights = self.instantiate(
+ n_items=choice_dataset.get_n_items(),
+ n_fixed_items_features=choice_dataset.get_n_fixed_items_features(),
+ n_contexts_features=choice_dataset.get_n_contexts_features(),
+ n_contexts_items_features=choice_dataset.get_n_contexts_items_features(),
+ )
+ self.instantiated = True
+ fit = super().fit(choice_dataset=choice_dataset, **kwargs)
+ if get_report:
+ self.report = self.compute_report(choice_dataset)
+ return fit
+
+ def _fit_with_lbfgs(
+ self, choice_dataset, epochs=None, sample_weight=None, get_report=False, **kwargs
+ ):
+ """Specific fit function to estimate the paramters with LBFGS.
+
+ Parameters
+ ----------
+ choice_dataset : ChoiceDataset
+ Choice dataset to use for the estimation.
+ n_epochs : int
+ Number of epochs to run.
+ sample_weight: Iterable, optional
+ list of each sample weight, by default None meaning that all samples have weight 1.
+ get_report: bool, optional
+ Whether or not to compute a report of the estimation, by default False.
+
+ Returns:
+ --------
+ conditionalMNL
+ self with estimated weights.
+ """
+ if not self.instantiated:
+ # Lazy Instantiation
+ print("Instantiation")
+ self.indexes, self.weights = self.instantiate(
+ n_items=choice_dataset.get_n_items(),
+ n_fixed_items_features=choice_dataset.get_n_fixed_items_features(),
+ n_contexts_features=choice_dataset.get_n_contexts_features(),
+ n_contexts_items_features=choice_dataset.get_n_contexts_items_features(),
+ )
+ self.instantiated = True
+ if epochs is None:
+ epochs = self.epochs
+ fit = super()._fit_with_lbfgs(
+ dataset=choice_dataset, epochs=epochs, sample_weight=sample_weight, **kwargs
+ )
+ if get_report:
+ self.report = self.compute_report(choice_dataset)
+ return fit
+
+ def compute_report(self, dataset):
+ """Computes a report of the estimated weights.
+
+ Parameters
+ ----------
+ dataset : ChoiceDataset
+ ChoiceDataset used for the estimation of the weights that will be
+ used to compute the Std Err of this estimation.
+
+ Returns:
+ --------
+ pandas.DataFrame
+ A DF with estimation, Std Err, z_value and p_value for each coefficient.
+ """
+ import tensorflow_probability as tfp
+
+ weights_std = self.get_weights_std(dataset)
+ dist = tfp.distributions.Normal(loc=0.0, scale=1.0)
+
+ names = []
+ z_values = []
+ estimations = []
+ p_z = []
+ i = 0
+ for weight in self.weights:
+ for j in range(weight.shape[0]):
+ names.append(f"{weight.name}_{j}")
+ estimations.append(weight.numpy()[j])
+ z_values.append(weight.numpy()[j] / weights_std[i].numpy())
+ p_z.append(2 * (1 - dist.cdf(tf.math.abs(z_values[-1])).numpy()))
+ i += 1
+
+ return pd.DataFrame(
+ {
+ "Coefficient Name": names,
+ "Coefficient Estimation": estimations,
+ "Std. Err": weights_std.numpy(),
+ "z_value": z_values,
+ "P(.>z)": p_z,
+ },
+ )
+
+ def get_weights_std(self, dataset):
+ """Approximates Std Err with Hessian matrix.
+
+ Parameters
+ ----------
+ dataset : ChoiceDataset
+ ChoiceDataset used for the estimation of the weights that will be
+ used to compute the Std Err of this estimation.
+
+ Returns:
+ --------
+ tf.Tensor
+ Estimation of the Std Err for the weights.
+ """
+ # Loops of differentiation
+ with tf.GradientTape() as tape_1:
+ with tf.GradientTape(persistent=True) as tape_2:
+ model = self.clone()
+ w = tf.concat(self.weights, axis=0)
+ tape_2.watch(w)
+ tape_1.watch(w)
+ mw = []
+ index = 0
+ for _w in self.weights:
+ mw.append(w[index : index + _w.shape[0]])
+ index += _w.shape[0]
+ model.weights = mw
+ for batch in dataset.iter_batch(batch_size=-1):
+ utilities = model.compute_batch_utility(*batch)
+ probabilities = tf.nn.softmax(utilities, axis=-1)
+ loss = tf.keras.losses.CategoricalCrossentropy(reduction="sum")(
+ y_pred=probabilities,
+ y_true=tf.one_hot(dataset.choices, depth=probabilities.shape[-1]),
+ )
+ # Compute the Jacobian
+ jacobian = tape_2.jacobian(loss, w)
+ # Compute the Hessian from the Jacobian
+ hessian = tape_1.jacobian(jacobian, w)
+ hessian = tf.linalg.inv(tf.squeeze(hessian))
+ return tf.sqrt([hessian[i][i] for i in range(len(tf.squeeze(hessian)))])
+
+ def clone(self):
+ """Returns a clone of the model."""
+ clone = SimpleMNL(
+ add_exit_choice=self.normalize_non_buy,
+ optimizer=self.optimizer_name,
+ )
+ if hasattr(self, "history"):
+ clone.history = self.history
+ if hasattr(self, "is_fitted"):
+ clone.is_fitted = self.is_fitted
+ if hasattr(self, "instantiated"):
+ clone.instantiated = self.instantiated
+ clone.loss = self.loss
+ clone.label_smoothing = self.label_smoothing
+ if hasattr(self, "report"):
+ clone.report = self.report
+ if hasattr(self, "weights"):
+ clone.weights = self.weights
+ if hasattr(self, "indexes"):
+ clone.indexes = self.indexes
+ if hasattr(self, "intercept"):
+ clone.intercept = self.intercept
+ if hasattr(self, "lr"):
+ clone.lr = self.lr
+ if hasattr(self, "_items_features_names"):
+ clone._items_features_names = self._items_features_names
+ if hasattr(self, "_contexts_features_names"):
+ clone._contexts_features_names = self._contexts_features_names
+ if hasattr(self, "_contexts_items_features_names"):
+ clone._contexts_items_features_names = self._contexts_items_features_names
+ return clone
diff --git a/notebooks/choice_learn_introduction_clogit.ipynb b/notebooks/choice_learn_introduction_clogit.ipynb
index bd20a3e0..406de3e3 100644
--- a/notebooks/choice_learn_introduction_clogit.ipynb
+++ b/notebooks/choice_learn_introduction_clogit.ipynb
@@ -211,8 +211,8 @@
"name": "stdout",
"output_type": "stream",
"text": [
- "The average neg-loglikelihood is: 0.67447394\n",
- "The total neg-loglikelihood is: 1874.3630829453468\n"
+ "The average neg-loglikelihood is: 0.6744666\n",
+ "The total neg-loglikelihood is: 1874.3427090644836\n"
]
}
],
@@ -267,25 +267,25 @@
"
\n",
" 0 | \n",
" beta_inter:0_0 | \n",
- " 0.698367 | \n",
- " 1.280208 | \n",
- " 0.545511 | \n",
- " 5.854024e-01 | \n",
+ " 0.698380 | \n",
+ " 1.280237 | \n",
+ " 0.545508 | \n",
+ " 5.854039e-01 | \n",
"
\n",
" \n",
" 1 | \n",
" beta_inter:0_1 | \n",
- " 1.844104 | \n",
- " 0.708454 | \n",
- " 2.602998 | \n",
- " 9.241223e-03 | \n",
+ " 1.844129 | \n",
+ " 0.708489 | \n",
+ " 2.602904 | \n",
+ " 9.243727e-03 | \n",
"
\n",
" \n",
" 2 | \n",
" beta_inter:0_2 | \n",
- " 3.274187 | \n",
- " 0.624366 | \n",
- " 5.244018 | \n",
+ " 3.274206 | \n",
+ " 0.624402 | \n",
+ " 5.243744 | \n",
" 1.192093e-07 | \n",
"
\n",
" \n",
@@ -293,7 +293,7 @@
" beta_income:0_0 | \n",
" -0.089087 | \n",
" 0.018347 | \n",
- " -4.855643 | \n",
+ " -4.855632 | \n",
" 1.192093e-06 | \n",
"
\n",
" \n",
@@ -301,7 +301,7 @@
" beta_income:0_1 | \n",
" -0.027993 | \n",
" 0.003873 | \n",
- " -7.228673 | \n",
+ " -7.228651 | \n",
" 0.000000e+00 | \n",
"
\n",
" \n",
@@ -309,15 +309,15 @@
" beta_income:0_2 | \n",
" -0.038147 | \n",
" 0.004083 | \n",
- " -9.342690 | \n",
+ " -9.342653 | \n",
" 0.000000e+00 | \n",
"
\n",
" \n",
" 6 | \n",
" beta_ivt:0_0 | \n",
- " 0.059509 | \n",
+ " 0.059510 | \n",
" 0.010073 | \n",
- " 5.907992 | \n",
+ " 5.908023 | \n",
" 0.000000e+00 | \n",
"
\n",
" \n",
@@ -325,39 +325,39 @@
" beta_ivt:0_1 | \n",
" -0.006784 | \n",
" 0.004433 | \n",
- " -1.530137 | \n",
- " 1.259828e-01 | \n",
+ " -1.530130 | \n",
+ " 1.259845e-01 | \n",
"
\n",
" \n",
" 8 | \n",
" beta_ivt:0_2 | \n",
" -0.006460 | \n",
" 0.001898 | \n",
- " -3.403037 | \n",
- " 6.663799e-04 | \n",
+ " -3.402944 | \n",
+ " 6.666183e-04 | \n",
"
\n",
" \n",
" 9 | \n",
" beta_ivt:0_3 | \n",
" -0.001450 | \n",
" 0.001187 | \n",
- " -1.221401 | \n",
- " 2.219341e-01 | \n",
+ " -1.221381 | \n",
+ " 2.219417e-01 | \n",
"
\n",
" \n",
" 10 | \n",
" beta_cost:0_0 | \n",
" -0.033339 | \n",
" 0.007095 | \n",
- " -4.698925 | \n",
+ " -4.698679 | \n",
" 2.622604e-06 | \n",
"
\n",
" \n",
" 11 | \n",
" beta_freq:0_0 | \n",
- " 0.092529 | \n",
+ " 0.092530 | \n",
" 0.005098 | \n",
- " 18.151833 | \n",
+ " 18.151777 | \n",
" 0.000000e+00 | \n",
"
\n",
" \n",
@@ -365,7 +365,7 @@
" beta_ovt:0_0 | \n",
" -0.043004 | \n",
" 0.003225 | \n",
- " -13.335643 | \n",
+ " -13.335551 | \n",
" 0.000000e+00 | \n",
"
\n",
" \n",
@@ -374,19 +374,19 @@
],
"text/plain": [
" Coefficient Name Coefficient Estimation Std. Err z_value P(.>z)\n",
- "0 beta_inter:0_0 0.698367 1.280208 0.545511 5.854024e-01\n",
- "1 beta_inter:0_1 1.844104 0.708454 2.602998 9.241223e-03\n",
- "2 beta_inter:0_2 3.274187 0.624366 5.244018 1.192093e-07\n",
- "3 beta_income:0_0 -0.089087 0.018347 -4.855643 1.192093e-06\n",
- "4 beta_income:0_1 -0.027993 0.003873 -7.228673 0.000000e+00\n",
- "5 beta_income:0_2 -0.038147 0.004083 -9.342690 0.000000e+00\n",
- "6 beta_ivt:0_0 0.059509 0.010073 5.907992 0.000000e+00\n",
- "7 beta_ivt:0_1 -0.006784 0.004433 -1.530137 1.259828e-01\n",
- "8 beta_ivt:0_2 -0.006460 0.001898 -3.403037 6.663799e-04\n",
- "9 beta_ivt:0_3 -0.001450 0.001187 -1.221401 2.219341e-01\n",
- "10 beta_cost:0_0 -0.033339 0.007095 -4.698925 2.622604e-06\n",
- "11 beta_freq:0_0 0.092529 0.005098 18.151833 0.000000e+00\n",
- "12 beta_ovt:0_0 -0.043004 0.003225 -13.335643 0.000000e+00"
+ "0 beta_inter:0_0 0.698380 1.280237 0.545508 5.854039e-01\n",
+ "1 beta_inter:0_1 1.844129 0.708489 2.602904 9.243727e-03\n",
+ "2 beta_inter:0_2 3.274206 0.624402 5.243744 1.192093e-07\n",
+ "3 beta_income:0_0 -0.089087 0.018347 -4.855632 1.192093e-06\n",
+ "4 beta_income:0_1 -0.027993 0.003873 -7.228651 0.000000e+00\n",
+ "5 beta_income:0_2 -0.038147 0.004083 -9.342653 0.000000e+00\n",
+ "6 beta_ivt:0_0 0.059510 0.010073 5.908023 0.000000e+00\n",
+ "7 beta_ivt:0_1 -0.006784 0.004433 -1.530130 1.259845e-01\n",
+ "8 beta_ivt:0_2 -0.006460 0.001898 -3.402944 6.666183e-04\n",
+ "9 beta_ivt:0_3 -0.001450 0.001187 -1.221381 2.219417e-01\n",
+ "10 beta_cost:0_0 -0.033339 0.007095 -4.698679 2.622604e-06\n",
+ "11 beta_freq:0_0 0.092530 0.005098 18.151777 0.000000e+00\n",
+ "12 beta_ovt:0_0 -0.043004 0.003225 -13.335551 0.000000e+00"
]
},
"execution_count": null,
@@ -469,14 +469,14 @@
"name": "stderr",
"output_type": "stream",
"text": [
- "100%|██████████| 1/1 [00:01<00:00, 2.00s/it]"
+ "100%|██████████| 1/1 [00:01<00:00, 1.73s/it]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
- "'Ground Truth' Negative LogLikelihood: tf.Tensor(1874.3633, shape=(), dtype=float32)\n"
+ "'Ground Truth' Negative LogLikelihood: tf.Tensor(1874.3427, shape=(), dtype=float32)\n"
]
},
{
@@ -526,11 +526,11 @@
"output_type": "stream",
"text": [
"Purchase probability of each item for the first 5 sessions: tf.Tensor(\n",
- "[[0.1906135 0.00353266 0.4053667 0.4004831 ]\n",
- " [0.34869286 0.00069682 0.36830992 0.28229675]\n",
- " [0.14418365 0.00651285 0.40567666 0.44362238]\n",
- " [0.34869286 0.00069682 0.36830992 0.28229675]\n",
- " [0.34869286 0.00069682 0.36830992 0.28229675]], shape=(5, 4), dtype=float32)\n"
+ "[[0.19061361 0.00353295 0.4053689 0.4004805 ]\n",
+ " [0.3486952 0.00069691 0.36830923 0.28229502]\n",
+ " [0.14418328 0.00651326 0.40567988 0.44361907]\n",
+ " [0.3486952 0.00069691 0.36830923 0.28229502]\n",
+ " [0.3486952 0.00069691 0.36830923 0.28229502]], shape=(5, 4), dtype=float32)\n"
]
}
],
@@ -582,14 +582,14 @@
{
"data": {
"text/plain": [
- "[,\n",
- " ,\n",
- " ,\n",
- " ,\n",
+ "[,\n",
+ " ,\n",
+ " ,\n",
+ " ,\n",
" ,\n",
- " ]"
+ " ]"
]
},
"execution_count": null,
@@ -611,7 +611,7 @@
{
"data": {
"text/plain": [
- ""
+ ""
]
},
"execution_count": null,
@@ -669,11 +669,11 @@
"text": [
"L-BFGS Opimization finished:\n",
"---------------------------------------------------------------\n",
- "Number of iterations: 170\n",
+ "Number of iterations: 190\n",
"Algorithm converged before reaching max iterations: True\n",
- "[, , , , , ]\n"
+ "[, , , , , ]\n"
]
}
],
@@ -834,16 +834,16 @@
{
"data": {
"text/plain": [
- "[,\n",
- " ,\n",
- " ,\n",
- " ,\n",
- " ,\n",
- " ,\n",
- " ,\n",
- " ,\n",
- " ,\n",
- " 1:0' shape=(1, 1) dtype=float32, numpy=array([[1.413982]], dtype=float32)>]"
+ "[,\n",
+ " ,\n",
+ " ,\n",
+ " ,\n",
+ " ,\n",
+ " ,\n",
+ " ,\n",
+ " ,\n",
+ " ,\n",
+ " 1:0' shape=(1, 1) dtype=float32, numpy=array([[1.4228866]], dtype=float32)>]"
]
},
"execution_count": null,
@@ -865,7 +865,7 @@
{
"data": {
"text/plain": [
- ""
+ ""
]
},
"execution_count": null,
diff --git a/notebooks/choice_learn_introduction_data.ipynb b/notebooks/choice_learn_introduction_data.ipynb
index 05c0f692..cfb6ca8f 100644
--- a/notebooks/choice_learn_introduction_data.ipynb
+++ b/notebooks/choice_learn_introduction_data.ipynb
@@ -320,9 +320,11 @@
"source": [
"The ChoiceDataset is ready !\n",
"\n",
+ "If your DataFrame is in the wide format, you can use the equivalent method *from_single_wide_df*. An example can be found [here](https://github.com/artefactory/choice-learn-private/blob/main/notebooks/dataset_creation.ipynb) on the SwissMetro dataset: \n",
+ "\n",
"You now have three possibilities to continue discovering the choice-learn package:\n",
"- You can directly go [here]() to the modelling tutorial if you want to understand how a first simple ConditionMNl would be implementd.\n",
- "- You can go [here]() if your dataset is organized differently to see all the different ways to instantiate a ChoiceDataset. In particular it helps if you DataFrame is in the wide format or if it is splitted into several DataFrames.\n",
+ "- You can go [here]() if your dataset is organized differently to see all the different ways to instantiate a ChoiceDataset. In particular it helps if you data is splitted into several DataFrames or if you have another format of data.\n",
"- Or you can continue this current tutorial to better understand the ChoiceDataset machinery and everything there is to know about it.\n",
"\n",
"Whatever your choice, you can also check [here](#ready-to-use-datasets) the list of open source datasets available directly with the package."
diff --git a/notebooks/latent_class_model.ipynb b/notebooks/latent_class_model.ipynb
new file mode 100644
index 00000000..88cd8f10
--- /dev/null
+++ b/notebooks/latent_class_model.ipynb
@@ -0,0 +1,287 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "# Example of use of Latent Class MNL"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import os\n",
+ "\n",
+ "os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"\"\n",
+ "\n",
+ "import sys\n",
+ "from pathlib import Path\n",
+ "\n",
+ "sys.path.append(\"../\")\n",
+ "\n",
+ "import matplotlib.pyplot as plt\n",
+ "import numpy as np\n",
+ "import pandas as pd\n",
+ "\n",
+ "import tensorflow as tf"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Let's use the Electricity Dataset used in this [tutorial](https://rpubs.com/msarrias1986/335556)."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from choice_learn.datasets import load_electricity\n",
+ "\n",
+ "elec_dataset = load_electricity(as_frame=False)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from choice_learn.models.simple_mnl import SimpleMNL\n",
+ "from choice_learn.models.latent_class_mnl import LatentClassSimpleMNL"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "lc_model = LatentClassSimpleMNL(n_latent_classes=3, fit_method=\"mle\", optimizer=\"lbfgs\", epochs=1000, tolerance=1e-10)\n",
+ "hist = lc_model.fit(elec_dataset, verbose=1)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "print(\"Latent Class Model weights:\")\n",
+ "print(\"Classes Logits:\", lc_model.latent_logits)\n",
+ "for i in range(3):\n",
+ " print(\"\\n\")\n",
+ " print(f\"Model Nb {i}, weights:\", lc_model.models[i].weights)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "print(\"Negative Log-Likelihood:\")\n",
+ "lc_model.evaluate(elec_dataset) * len(elec_dataset)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Latent Conditional MNL\n",
+ "We used a very simple MNL. Here we simulate the same MNL, by using the Conditional-MNL formulation.\\\n",
+ "Don't hesitate to read the conditional-MNL tutorial to better understand how to use this formulation."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from choice_learn.models.latent_class_mnl import LatentClassConditionalMNL"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "lc_model_2 = LatentClassConditionalMNL(n_latent_classes=3,\n",
+ " fit_method=\"mle\",\n",
+ " optimizer=\"lbfgs\",\n",
+ " epochs=1000,\n",
+ " tolerance=1e-12)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "For each feature, let's add a coefficient that is shared by all items:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "lc_model_2.add_shared_coefficient(coefficient_name=\"pf\",\n",
+ " feature_name=\"pf\",\n",
+ " items_indexes=[0, 1, 2, 3])\n",
+ "lc_model_2.add_shared_coefficient(coefficient_name=\"cl\",\n",
+ " feature_name=\"cl\",\n",
+ " items_indexes=[0, 1, 2, 3])\n",
+ "lc_model_2.add_shared_coefficient(coefficient_name=\"loc\",\n",
+ " feature_name=\"loc\",\n",
+ " items_indexes=[0, 1, 2, 3])\n",
+ "lc_model_2.add_shared_coefficient(coefficient_name=\"wk\",\n",
+ " feature_name=\"wk\",\n",
+ " items_indexes=[0, 1, 2, 3])\n",
+ "lc_model_2.add_shared_coefficient(coefficient_name=\"tod\",\n",
+ " feature_name=\"tod\",\n",
+ " items_indexes=[0, 1, 2, 3])\n",
+ "lc_model_2.add_shared_coefficient(coefficient_name=\"seas\",\n",
+ " feature_name=\"seas\",\n",
+ " items_indexes=[0, 1, 2, 3])"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Fit\n",
+ "hist2 = lc_model_2.fit(elec_dataset, verbose=1)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "print(\"Negative Log-Likelihood:\", lc_model_2.evaluate(elec_dataset)*len(elec_dataset))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "print(\"Latent Class Model weights:\")\n",
+ "print(\"Classes Logits:\", lc_model_2.latent_logits)\n",
+ "for i in range(3):\n",
+ " print(\"\\n\")\n",
+ " print(f\"Model Nb {i}, weights:\", lc_model_2.models[i].weights)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Just like any ChoiceModel you can get the probabilities:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "lc_model.predict_probas(elec_dataset[:4])"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "If you want to use more complex formulations of Latent Class models, you can directly use the *BaseLatentClassModel* from *choice_learn.models.base_model*:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from choice_learn.models.base_model import BaseLatentClassModel"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "manual_lc = BaseLatentClassModel(\n",
+ " model_class=SimpleMNL,\n",
+ " model_parameters={\"add_exit_choice\": False},\n",
+ " n_latent_classes=3,\n",
+ " fit_method=\"mle\",\n",
+ " epochs=1000,\n",
+ " optimizer=\"lbfgs\"\n",
+ " )\n",
+ "manual_lc.instantiate(n_items=4,\n",
+ " n_fixed_items_features=0,\n",
+ " n_contexts_features=0,\n",
+ " n_contexts_items_features=6)\n",
+ "manual_hist = manual_lc.fit(elec_dataset, verbose=1)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "manual_lc.evaluate(elec_dataset) * len(elec_dataset)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "If you need to go deeper, you can look in *choice_learn/models/latent_class_mnl* to see different implementations that could help you."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": []
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3 (ipykernel)",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.11.4"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}
diff --git a/notebooks/logistic_regression.ipynb b/notebooks/logistic_regression.ipynb
new file mode 100644
index 00000000..45252ec3
--- /dev/null
+++ b/notebooks/logistic_regression.ipynb
@@ -0,0 +1,184 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "# Logistic Regression: 3-class Classifier\n",
+ "\n",
+ "The Conditional MNL is a generalization of the multi-class Logistic Regression.\n",
+ "Here, we recreate the scikit-learn tutorial that can be found [here](https://scikit-learn.org/stable/auto_examples/linear_model/plot_iris_logistic.html#sphx-glr-auto-examples-linear-model-plot-iris-logistic-py)."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import os\n",
+ "\n",
+ "# Remove GPU use\n",
+ "os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"\"\n",
+ "\n",
+ "import sys\n",
+ "\n",
+ "sys.path.append(\"../\")\n",
+ "\n",
+ "import matplotlib.pyplot as plt\n",
+ "import numpy as np\n",
+ "\n",
+ "from sklearn import datasets\n",
+ "from sklearn.inspection import DecisionBoundaryDisplay\n",
+ "\n",
+ "from choice_learn.models import ConditionalMNL\n",
+ "from choice_learn.data import ChoiceDataset"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# import some data to play with\n",
+ "iris = datasets.load_iris()\n",
+ "X = iris.data[:, :2] # we only take the first two features.\n",
+ "Y = iris.target"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "We need to create a ChoiceDataset object. Features are contexts_features as they are shared by the three outcomes. The class labels are ''choices''."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "dataset = ChoiceDataset(contexts_features=(X, ),\n",
+ "contexts_features_names=([\"feat_1\", \"feat_2\"], ),\n",
+ " fixed_items_features=np.ones((3, 3)),\n",
+ " choices=Y)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "For the model parametrization, we specify that we want to learn one weight by outcome for each feature: 'feat_1', 'feat_2' and the intercept. This is done with the keyword \"item-full\"."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "parametrization = {\n",
+ " \"intercept\": \"item-full\",\n",
+ " \"feat_1\": \"item-full\",\n",
+ " \"feat_2\": \"item-full\"\n",
+ "}\n",
+ "\n",
+ "# Let's estimate the weights\n",
+ "model = ConditionalMNL(parameters=parametrization, optimizer=\"lbfgs\")\n",
+ "hist = model.fit(dataset, epochs=100)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Let's display the resulting model, just as in the sk-learn tutorial."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "feature_1, feature_2 = np.meshgrid(\n",
+ " np.linspace(X[:, 0].min() - 0.5, X[:, 0].max() + 0.5),\n",
+ " np.linspace(X[:, 1].min() - 0.5, X[:, 1].max() + 0.5)\n",
+ ")\n",
+ "grid = np.vstack([feature_1.ravel(), feature_2.ravel()]).T\n",
+ "\n",
+ "grid_dataset = ChoiceDataset(contexts_features=(grid, ),\n",
+ "contexts_features_names=([\"feat_1\", \"feat_2\"], ),\n",
+ " fixed_items_features=np.ones((3, 3)),\n",
+ " choices=np.ones(len(grid), ))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "keep_output": true
+ },
+ "outputs": [
+ {
+ "data": {
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAAjcAAAGwCAYAAABVdURTAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8pXeV/AAAACXBIWXMAAA9hAAAPYQGoP6dpAAC2V0lEQVR4nOzdZ3RUVReA4Xdqeu8J6SGU0HvvvUhTioAFQUD9RIoiiBURVJSiiGJDBEXpUqX3DqH3kt57nT7fj0AwZoIJZEgI51kra+ncm3P3DMlkz7nn7C0xGo1GBEEQBEEQqghpRQcgCIIgCIJQnkRyIwiCIAhClSKSG0EQBEEQqhSR3AiCIAiCUKWI5EYQBEEQhCpFJDeCIAiCIFQpIrkRBEEQBKFKkVd0AI+awWAgLi4OOzs7JBJJRYcjCIIgCEIpGI1GsrOz8fb2Riq9/9zME5fcxMXF4evrW9FhCIIgCILwAKKjo6lWrdp9z3nikhs7OzsAvtp6HCsb2wqORhAE4fFks7BfRYcgPGHytHpe2nCr8O/4/Txxyc3dW1FWNrZY2/73CyQIgiAUZ62QVXQIwhOqNEtKxIJiQRAEQRCqFJHcCIIgCIJQpYjkRhAEQRCEKkUkN4IgCIIgVCkiuREEQRAEoUp54nZLCYIgCCWz+bxrRYcgCA9NzNwIgiAIglCliORGEARBEIQqRSQ3giAIgiBUKSK5EQRBEAShShHJjSAIgiAIVYpIbgRBEARBqFLEVnBBEIQngNjiLTxJxMyNIAiCIAhVikhuBEEQBEGoUkRyIwiCIAhClSKSG0EQBEEQqhSR3AiCIAiCUKWI5EYQBEEQhCpFJDeCIAiCIFQpIrkRBEEQBKFKEcmNIAiCIAhVikhuBEEQBEGoUkRyIwiCIAhClSKSG0EQBEEQqhSR3AiCIAiCUKWI5EYQBEEQhCpFXtEBCIIgCA/O5vOuFR2CIFQ6YuZGEARBEIQqRSQ3giAIgiBUKSK5EQRBEAShShHJjSAIgiAIVYpIbgRBEARBqFJEciMIgiAIQpUitoILgiBUQmKLtyA8ODFzIwiCIAhClSKSG0EQBEEQqhSR3AiCIAiCUKWI5EYQBEEQhCpFJDeCIAiCIFQplSa5mT17NhKJhDfeeKPEc/bu3YtEIin2deXKlUcXqCAIgiAIlVql2Ap+4sQJlixZQr169Up1/tWrV7G3ty/8fzc3N3OFJgiCIAjCY6bCZ25ycnIYPnw433//PU5OTqX6Hnd3dzw9PQu/ZDJZieeq1WqysrKKfAmCIAiCUHVVeHLz6quv0rt3b7p06VLq72nYsCFeXl507tyZPXv23Pfc2bNn4+DgUPjl6+v7sCELgiAIglCJVWhys3LlSk6fPs3s2bNLdb6XlxdLlixhzZo1rF27lho1atC5c2f2799f4vdMmzaNzMzMwq/o6OjyCl8QBEEQhEqowtbcREdHM2HCBLZv346lpWWpvqdGjRrUqFGj8P9btmxJdHQ0c+fOpV27dia/x8LCAgsLi3KJWRAEQRCEyq/CZm5OnTpFUlISjRs3Ri6XI5fL2bdvHwsXLkQul6PX60s1TosWLbh+/bqZoxUEQRAE4XFRYTM3nTt35vz580Uee/HFF6lZsyZTp0697yLhfwoPD8fLy8scIQqCIAiC8BiqsOTGzs6OOnXqFHnMxsYGFxeXwsenTZtGbGwsy5YtA2D+/PkEBAQQFhaGRqNh+fLlrFmzhjVr1jzy+AVBEARBqJwqRZ2bksTHxxMVFVX4/xqNhilTphAbG4uVlRVhYWFs3ryZXr16VWCUglB15WSms2/jKs4c3INeqyEorD6dB43Ayz+ookN7bNl83rWiQxCEKk9iNBqNFR3Eo5SVlYWDgwM/7L+Eta1dRYcjCJXWzYtn+Ox/z5Ofm0O9lu2wtLbh3NH95GZl8uLbH9Pl6ZEVHeJjSSQ3gvBg8rR6hq2+TmZmZpFCvqZU6pkbQRAqRl5ONp+//gIevgFM+uJ7HF3dAdCoVfw2fxY/fTIdn8Dq1GrcooIjFQRBKK7Ci/gJglD5HNqylpysDCZ89m1hYgOgtLDkuTc/xDekJlt/+6ECIxQEQSiZSG4EQSjm3NH91GrcEheP4jsRpVIprXsN4PyRkotnCoIgVCSR3AiCUIxBp0epLLn4pUJpUepaVIIgCI+aSG4EQSimer1GXDp5mNzsTJPHT+7ZRvV6jR5xVIIgCKUjkhtBEIrp0H8oBqOBH2dNQ6fVFjm2Y9UyLp86SrchL1RMcIIgCP9B7JYSBKEYR1d3Xv34K76a9ioTz7Whdc8BWFpZc3r/Tm5cCKf7sFE06yzqSwmCUDmJ5EYQBJOade7JrOWb2bbyJ/ZvXIVOqyGodn0mz/uJRu26IJFIKjpEQRAEk0RyIwhCifxCa/Hye59XdBiCIAhlItbcCIIgCIJQpYjkRhAEQRCEKkUkN4IgCIIgVCkiuREEQRAEoUoRC4oFQRDKgej2LQiVh5i5EQRBEAShShHJjSAIgiAIVYpIbgRBEARBqFJEciMIgiAIQpUikhtBEARBEKoUsVtKEKqQ25fPc+X0MQBqNmpOYK26FRyRIAjCoyeSG0GoAlIT41k0/TWuhB9HaWkJgEalombDZrz6yde4eHhVcISPL7HFWxAeP+K2lCA85vJzc/hk3FCS42OY+MX3/Lj/Mj/uv8zEL74nJT6WT8YNJT83p6LDFARBeGREciMIj7n9G1eTGBPJ9MW/07RjD2RyOTK5nKYdezDt299JjIlk/8bVFR2mIAjCIyOSG0F4zB3auo5G7brg5R9U7JiXXyCN2nXh8Lb1jz4wQRCECiKSG0F4zOVmZeDu41ficXcfP3Iy0x9hRIIgCBVLJDeC8Jhz9arGzQtnSjx+88IZ3Lx9H11AgiAIFUwkN4LwmOs4YBhXz5zg7KE9xY6dPbyXq2dO0KH/0AqITBAEoWKIreCC8Jhr1qknDdp04svJY+gxbBQtuvUF4OiOTWz77UcatOlEs049KzhKQRCER0ckN4LwmJPKZEycu4RV38xl15oVbPxlMQDWtvZ0H/oiz7wyBalMVsFRCoIgPDoiuRGEKkChtODZN95h0NhJRF2/DIBf9VpYWFlVcGSCIAiPnkhuBKGc5OfmcGDTao7t3Ex+bi5e/oF0HjSCWo1bIJFIHkkMFlZWVK/X6JFcSxAEobISC4oFoRwkxkTy9tDuLJv7AZbWtgSH1Sfq2mU+fnkwP30yHYPBUNEhCoIgPDHEzI0gPCSDwcCXk0YjlUr5Yu1ePHwDADAajexdv5LvZ75FteAadB/6QoXGKQiC8KQQMzeC8JAunjhE9I0rvPze54WJDYBEIqHjgGG06tmfbb/9IGZvBEEQHhGR3AjCQ7p4/BDO7p7UbNTc5PHWPfqTGBNJakLsI45MEAThySRuSwnCQzIajUilshIXDd/dhi1mbioXm8+7VnQIgiCYiZi5EYSHFFq/MSkJsdy8eMbk8eO7tuDs4YWrp8+jDUwQBOEJJZIbQXhIDdt0xr2aHz98/DZZ6alFjp3c+zf7N66i2+DnkcnFRKkgCMKjIN5tBeEhSWUyJs39gU9eeZYJfVrRsltfnNw9uXzyCFfCj9Oscy96jxxb0WEKgiA8MURyIwjlwC+0FnNW/s3O1cvvFPHLxss/mNfnfEOzzr1E+wNBEIRHSCQ3glBOHF3deXrcJJ4eN6miQ6kwqvw80pMSsLS2wcnNo6LDEQThCSWSG0EQHlpmWgqrvpnLoa3rUOfnAVCjQVMGjJlAvZbtKzg6QRCeNJUmuZk9ezbTp09nwoQJzJ8/v8Tz9u3bx6RJk7h48SLe3t689dZbjBs37tEFKghCEZlpKXzw4gDysrPo+/x4ajVuTlpSAjtX/cqnr41k/MwFtOk14JHFI7Z4C4JQKZKbEydOsGTJEurVq3ff827fvk2vXr0YM2YMy5cv59ChQ7zyyiu4ubkxaNCgRxStIAj/tHrxF+RlZzHz1424+/gVPt6yez++fW8iP8+eTuP2XbGysa3AKAVBeJJU+FbwnJwchg8fzvfff4+Tk9N9z/3222/x8/Nj/vz51KpVi9GjRzNq1Cjmzp37iKIVBOGf1Pn5HNyylm5DXiiS2ABIpVKG/O9t1Kp8Dm/bUEERCoLwJKrw5ObVV1+ld+/edOnS5T/PPXLkCN26dSvyWPfu3Tl58iRardbk96jVarKysop8CYJQPtKTE1Dn55XYesLFwwt3Hz8Som494sgEQXiSVWhys3LlSk6fPs3s2bNLdX5CQgIeHkV3YHh4eKDT6UhJSTH5PbNnz8bBwaHwy9fX96HjFgShwN1bTelJCSaP67QastLTsLKxe5RhCYLwhKuw5CY6OpoJEyawfPlyLC0tS/19/+7fYzQaTT5+17Rp08jMzCz8io6OfvCgBUEowsHFjZoNm7Fz9a8me2cd2rqevOxMmnXpVQHRCYLwpKqw5ObUqVMkJSXRuHFj5HI5crmcffv2sXDhQuRyOXq9vtj3eHp6kpBQ9BNiUlIScrkcFxcXk9exsLDA3t6+yJcgCOVnwJgJXD9/mm/enUBKfEHnc61Gzb6//mTpnBm06NqHakGhFRylIAhPkgrbLdW5c2fOnz9f5LEXX3yRmjVrMnXqVGQmKrq2bNmSjRs3Fnls+/btNGnSBIVCYdZ4BUEwrW6Ldrw6ayE/fvw2R/7+C49q/mSlp5GXnUmLbn0Z+/4XFR2iIAhPmApLbuzs7KhTp06Rx2xsbHBxcSl8fNq0acTGxrJs2TIAxo0bx9dff82kSZMYM2YMR44c4ccff+T3339/5PELgnBPq+79aNS2C0e2byQ+4iZWtrY069QLn6DqFR2aIAhPoEpR56Yk8fHxREVFFf5/YGAgW7ZsYeLEiSxatAhvb28WLlwoatwIVVr0zWus+34+makpuHlX45lX3sTFw6uiwyrG0tqGjv2HVnQYgiAISIx3V+Q+IbKysnBwcOCH/ZewthU7OITKS6fT8fHLg7l+9iQSqRQbewdyMtKRSKQ079qb/81eVNEhVkqiQrEgVE15Wj3DVl8nMzPzP9fPVuqZG0F4ks15ZTjXzpxg4Jg36PHsKGwdnEhLimfd9wvZtWY5VjZ2jJ4xp6LDFARBqHQqvIifIAjFpcTHcuX0Ufo+P56nx0/G1qGgerezuxejpn9Cs869OLBpNTqdroIjFQRBqHxEciMIldCmZYsxGAx0H/ZisWMSiYQew0ah1ag5sGl1BUQnCIJQuYnkRhAqoaz0NCRSKU5uniaPu3pVAyAjJelRhiUIgvBYEGtuBKES8g2pydHtG7l58QwhdRoWO37t3EkAgus0eMSRVRyxUFgQhNISMzeCUAn1fW4cCqUFfy76HN2/msLm5WSzdsl8bOwdqNeiXQVFKAiCUHmJ5EYQKiG5UsnAl9/g4vGDvP9ifw5vW8+tS+fYvfY33hnei4So24x+59OKDlMQBKFSErelBKGS6jfqNSytbVj97Rd8Pf1/hY87urgx4dPFNO3UswKjEwRBqLxEciMIlVj3oS/SfeiLXDt7isSYCPyq18I/tHZFhyUIglCpieRGEO5Dr9ez5MMpHNuxCYPBABhx8fThlZnzqV6v8SOLI7R+Y0LrP7rr3ZUSH8vO1b9y9tAedFotgbXr0fWZ56her9Ejj0UQHmcavYF9EVnsuZ1JhkqPi7WcjoEOtPWzRyGTPNCYeoORozHZ7LiZSVKuFjsLGe387ekYaI+1onjz6SeJWHMjCCXQaDSM7ViPA5tW4+7jR9fBz9G0U09SE2L58KVBbF3xQ0WHaFYXjx/izac7sWPVMgJq1qFOi7ZcO3uC91/ox/ofFlZ0eILw2MhW63l7RySLjidgIZfSxNsGgAVH43l3dxR5Wn2Zx9TqDcw6EMNnh+JQ6Qw08bbBwULGD6cTmfJ3JKl52v8epAoTMzeCUIIPXuhPXk42L78/l/ZPDUYiKfh0lZ6cyOxXnuW3+bPo0G8oVra2FRxp+cvOSOfLKWMIrd+ENz7/Diubguc4cvL7rF0ynz+/+ZzAWnWp37pjuVxvSObc/zxnU7lcSRAevUUnEkjK1fFF9wCCnS0LH7+cnMdH+2L4/lQSE1qUrRnuinMpnEvI4/321Wjkfe89KCZLzft7ovnicByfdPEvt+fwuBEzN4Jggl6vJ/rGZVp07U2HfkMKExsAJzcPXpm5AL1ex+L33qi4IM1o/8Y/0arVvPLxgsLEBkAqlTJo7ESCwuqz7fefKjBCQXg8JOVqORqdzcj6bkUSG4BabtYMCXNhf2QWGarSt1JR6Qxsv5lB3xpORRIbgGr2Foxu5MHF5HxupanK5Tk8jkRyIwgmXD51BL1OR8vuT5k8HlCzDm7evlw9c+IRR/ZoXD51lNpNWuLg7FrsmEQioWW3vlw6daQCIhOEx8ul5DyMQBs/O5PH2/jbozMYuZaaX+oxIzPU5GoNtPUz3Rm7mY8tSpmEC0l5DxJylSCSG0EwwaA3VHQIgiAIJhkrOoDHgEhuBMGEsGatkcnlHNm+0eTxiCsXSI6LJrR+k0cc2aNRq3ELLp08QlZ6arFjRqORozs2UatRiwqITBAeL7XdrJEAh6KyTR4/FJWNXCoh1MWq1GMGOFpgo5ByMCrL5PETcTlo9EbC3K0fJOQqQSQ3gmCCTCbDN6QWR7dvYv/GVRiN9z4rZaYms/i9ichkcl6ZuaACozSfdn0Ho1Aq+WbGBFR5uYWPGwwG1n2/gJsXztDj2ZcqMEJBeDy42yhoUc2WX88lcyu96BqYqyn5rLyQQjt/exwtS7+/x1IupVuwIxuvpRMen1vkWFy2hh9OJVLbzarYGp8nicT4z3ftJ0BWVhYODg78sP8S1ram74EKAhRsBR/fpQH5Odn4htSkbou2ZKQkcXzXFgwGA89OeIdeI8ZUdJhmc+HYQb6Y9BIymZxmXXphaWVD+IGdJMZE8swrUxgwekK5XatUu6WW/F1u1xOERylbree9PVHcTlfTyMsGXwcLIjJUnEnIo6arFe93qFbmujRavYFPDsRyOj6X2m5WhLpYkZCj4XhsDp62SmZ28sXVWmGmZ1Qx8rR6hq2+TmZmJvb2ptcb3SWSG0G4D71ez5IPJnN052aM/yjiN+7DL6nRoGlFh2d2yXEx7Fz9K2cO7kan0xJUqx5dBz9f7gUFRXIjVHVqnYH9kVnsvp1JhkqHs5WCzkEOtPWzQyF7sJsoeoORI4VF/DTYKWW0D3CoskX8RHJzHyK5ER5EbnYmyXExWFpZ4+EbUGRr+INS5eeRGB2BXK7Ayz8IqazkN6PUhDiyM9JwcvPAwcXtoa9d2ZQmuSktkQQJQtVUluRGFPEThPvISEli5VezOfL3RrQaNQC+ITUZMGYCLbr2eaAxVXm5/Lnoc/Zt/JP8nIJFhm4+fvQZ+TJdnnmuSOJ0Jfw4q775nMunjgIgkUpp2KYTQ//3NtWCazzksxMEQaiaRHIjCCXITEvhw1EDUeXnMWjcJGo3aUlmago7V//KwqnjyU5Ppevg58s0pjo/n0/GP0vMzat0H/oiDdt2QZ2fy/6Nq/l5zgySYqMZPnEGAOeP7uez11/AP7QWr85aiKdfIBFXLrJl+RI+eHEg7/24Gr/qtczx1AVBEB5rIrkRhBKsXTKf3JwsZi3fjJu3b+Hjjdp1Yemn77L8y5k079oXeyfnUo+5c82v3L58ng9+XktwWIPCx+u2aEdAzTBWzPuYtr0HUS04lB8+fpvaTVrw5oJfkCsKFgYGhzWgZfen+ODF/iz7/H1mLPmz3J6vIAhCVSG2gguCCVqNmoOb19Dl6ZFFEhsoqNA7aOwkAA5uXlOmcfes/Z3mXXoVSWzu6j50FI6u7uxe9xsXjh8kOS6aZ8a/WZjY3GVta0f/l/7HpZNHiI+6XbYnJgiC8AQQyY0gmJCVlkp+bg41SijSZ+/kjJd/EIkxkWUaNzEmosRdVnKFgpC6DUmMiSQxOgKpTEZwnQYmzw2tXzBGUhmvLwiC8CQQyY0gmGBlY4tEIiElPtbkcZ1WS3pyIjZ291+x/2/WtvYljgmQEh+LjZ091nb2GPR6MlISSzgvBgAbO4cyXV8QBOFJINbcCIIJ1nb21GvVgZ2rf6V9vyHFbg0d/nsD2RlptOjWt0zjtujWl30bV9Fv1GtY/ysxuhJ+nIgrFxj48hvUbtwSCytrtv32E8MmTC9yntFoZNvKn3Hz8SOodr0He4KPSHlu8RYEQSgtMXMjCCUYMHoCMbeuM2/Ky8Teug4U7HbatXo5P30yjeZd+5R5t1KvEWPQabXMfnU4Ny6EYzQa0et0HNu5mXmTxxBcpwEN23TG2s6ePs+NY+Mvi1n97ZfkZKYDkJYUz0+zp3N852aeHjvpvrVxBEEQnlSiiJ8g3Ef4wd189/4kstJTcXLzIC87C7Uqn9Y9BzDm3U9RWpa+2d1dty6dY+Hb40mKicLBxQ2NWkV+TjZ1W7TltU8WYefoBBT0cVr1zedsWvYdEokEeydnMlKTUSgteHbC9DJvQ68IFTFzI4r4CULVJCoU34dIboSy0mrUnNzzN7G3b2BpbU3Tjj3w8A14qDENej1nD+/l1qVzyBUK6rfuSECNMJPnZqYmc3THZrLTU3Hx8qF5l96Pzc+uSG4EQSgvokKxUGH0Oh2n9+/g5sWzSGUy6rVsT40GTculXUFp5GRlcGTbBpLjYrB1dKJlt6dw865m8tybF88Qvn8XWq2GgBphNO3UA7lCWew8hdKClt2fKtc4pTIZDdt2pmHbzv95roOLG92HvlCu1xeEyi5Xo+dAVBYJ2VpslTLa+NvhaVv891MQTBHJjVBubl48w4I3x5GSEIurpw8ajZr1PywkpG4jJs79Dic3T7Ne/++VS/l94Sz0Oj2uXj5kpCTxx9ef0mXQCJ5780Nk8oIf96z0NBZOHc+lk4exc3TG0tqGjUu/wdHVnf/N/ppajVuaNU5BEO5v+80MfjydiEZvxN1GQaZKz/JzyXQOcmBcE08UskfzYUl4fInkRigXyXHRzH5lBN4BwUya9yMBNcIwGAycP7qf72e+xZxXR/Dx8s0olBZmuf7BLev45bN36Tr4eQaOmYCDixuq/Dx2r1nBbwtmIVcqGTn5fQx6PXMnvEBSbBSTvvyBRm27IJXJiLl1jV8+fY/P/vc8M5dvolpQqFniFATh/o5EZ7PoeAJdgx0YVscVF2sFap2Bnbcy+Sk8EZlUwitNzftBSXj8id1SQrnY9tuPyGQy3v7618K1I1KplPqtOjBl/s9E37jK8V1bzHJtg8HA2iXzaNqpJy9MnVnYNdvSyppeI8bw9NhJ7PhzGVnpqZw9so8bF8KZ8Nm3NOnQvXC3UbWgUKbM/xk7Jxc2L/vOLHEKgnB/RqORlRdSaORlw6tNPXGxLijBYCGX0jvUiefqu7PjZgYpedoKjlSo7MTMjVAuju3cTJveA4vVbgEIqBFGjQZNObpjE617Dij3a0ddu0RC1G1eemeOybU9nZ8eyapvv+DUvh1cDT+Ob0hNajZqXuw8CysrOvQbwsal3/Dy+3Mf2Tqhx5GoXyOYQ1y2logMNSPruZn8/esa7MCvZ5M5Ep1N3xql7+kmPHnEzI1QLvJysnF2L3mq2NnDi/ycHLNdGyjx+naOTlhYWpGfk03+nThLSlyc3T1Rq/Ix6PVmiVUQhJLlaQt+75ytTX/utlbIsFZIydcaHmVYwmNIJDdCufD0C+LK6WMmj+l1Oq6eOYGXf5BZru3hG4BEIinx+rcvn0eVl4uXfxCe/kHcvHgGjSrf5LmXTx/DvZpf4eJjQRAeHQ9bBTIJXEzKM3k8OlNNplqPt73YNSXcn0huhHLReeCznD6wi4snDhc7tm3lT6QlxtNxwDCzXNvFw4sGbTqz4eevyUpPLXJMp9Ww8qs5uHh6U69lezr2H0pOZgYbfl5UbJybF89w5O+/6DRwuFniFATh/uwt5LTwtWP9lTTS8nVFjukNRn49m4yDhYzmPrYVFKHwuBAfT4Vy0b7fYI7t2sJn/3uOjgOG0bRjD9SqfA5sXsOxHZvoPfJls/ZBGjn5PT4YNZB3hveix7BRBIc1ICE6gu1/LCXm5jWmzP8JmVyOp18gg195kz+/+ZzIqxdp328INnYOhB/Yyc7VywmoWYfuQ140W5yCINzfiw3cmbojksl/R9An1ImarlYk5mrZci2dW+kqprbxQSETn8uF+3ugCsUGg4EbN26QlJSEwVD03me7du3KLThzEBWKzUerUbPuh4XsXrOicAbFyz+I3iPH0nHAMLMv0E2KjeLPbz7n+M4t6LQaAOq2aMugsZMIrd+kyLkHN69l4y/fEH3jKgC2Do60f2oIg8ZNwtLK2qxxVgWVeUGxqFD8+EvO1bLiXDIHo7LRGgr+RNV1t2ZoXVfquIvfzyeVWdsvHD16lGeffZbIyEj+/a0SiQR9JV+IKZIb89NpNSTHxyKTyXDz9n3ku47ysrPISE3Gxt4BB2fXEs8zGo2kxMei02pw8fRGaWH5CKN8vInkRngU8rR60vJ12CpkOFqJGw1POrO2Xxg3bhxNmjRh8+bNeHl5ie2yQjFyhRIvv8AKu761nb3JLen/dPHkYVZ8OZO42zcwGo3YOjjSfeiL9Hl+PFLpvSlvnU7HqkWfse+vP8nPzUEqleJfsw7Pv/UhgTXrFhkzMzWZnauXc3THJlR5OXj5B9Np4LM069yryJjmcvvyef5e+TOX7yysrtWoOd2Hvkhgrbr/8Z2CUDkV7I6SVXQYwmOozO+4169f55NPPqFWrVo4Ojri4OBQ5KssFi9eTL169bC3t8fe3p6WLVuydevWEs/fu3cvEomk2NeVK1fK+jSEJ9hfS7/hk3HDSIqNov1Tg+k5fDQ2dg6s/GoO7z7Xt/BWq06jYXL/dmz8ZTHu1fzoPfJlWvXsT8SVC7w38ikObllXOGbU9ctMHdKNTcu+JaROA9r2eRqdRs3CqeNZ+PYr6HW6ksIpF7vX/saMEb25dPIIzTr1oFmnHlw6eYQZI3qze+1vZr22IAhCZVPmmZvmzZtz48YNQkJCHvri1apVY86cOYVj/fLLL/Tr14/w8HDCwkx3SAa4evVqkSkpNze3h45FeDIkx0Xz59efUbd5G96Y+33h+pohr01l+x9L+eWz91gxbyYjJ7/PwmmvkhIfw+tzvqFFt76FYwz93zTmvDqcJR9OoUnH7iiVFsybPAYnNw/eXrS8yK2wE3u2seCtcWxevoSnXnjFLM8p6tplfvxkGl2eHsnzb31UWHV52OvT+eXz9/nxk2mE1GmIX2gts1xfEAShsinVzM25c+cKv/73v/8xefJkli5dyqlTp4ocO3fuXJku3rdvX3r16kVoaCihoaHMmjULW1tbjh49et/vc3d3x9PTs/BLJhPTlkLp/PrFRyCBcR/OK7JwWCKR0H3oi9Rq3JJ9f/2JTqfj7KE9tOrRv0hiAwVFAcd+8AU6rYZV33xO+MHdJMZEMnrGnGJrfJp27EH7pwaz/Y+lZpu92f7nLzi5uvPcmx8WJjZQ0Hn8uSkf4OTqzvY/fzHLtQVBECqjUs3cNGjQAIlEUmQB8ahRowr/++6xh1lQrNfrWbVqFbm5ubRsef+uzA0bNkSlUlG7dm1mzJhBx44dSzxXrVajVqsL/z8rK+uB4hOqhluXzlKjQVMcXd1NHm/RrQ+XTx0h4sp5tBo1zbr0MnmeX/VauFfz5+KJw8gVSlw9fQgOa2Dy3Gade7Fn3e+kJMTiUc2/vJ5KoatnTtC4Q3eThQdlcjmNO3Tn0skj5X5dQRCEyqpUyc3t27fNFsD58+dp2bIlKpUKW1tb1q1bR+3atU2e6+XlxZIlS2jcuDFqtZpff/2Vzp07s3fv3hK3oM+ePZsPP/zQbPELj5/7tVa4e0wiLZgBMRpKLvNu0OuQULDuy2DQFyb4JY5ppsX3EgkYDPd/TmLdvyAIT5JS3Zby9/cv/IqMjMTHx6fIY/7+/vj4+BAZGVnmAGrUqMGZM2c4evQo48eP5/nnn+fSpUslnjtmzBgaNWpEy5Yt+eabb+jduzdz55a8LXXatGlkZmYWfkVHR5c5RqHqqNWwOdfOniQlPrbYMaPRyMEta7F1cCKwZh2UllYc2rre5Dg3L54hJT6WBm07Eda0FWlJCVwNP27y3MPbNuBezQ9Xr2rl+VQKhTVtzYldW9Fq1MWOaTVqTuzeSljT1ma5tiAIQmVU5gXFHTt2JD4+Hnf3otP6mZmZdOzYscy3pZRKZeGC4iZNmnDixAkWLFjAd999V6rvb9GiBcuXLy/xuIWFBRYWFmWKSai6hk96l6M7N7Hw7fFMmb8Ue6eCzsIGvZ613y/g5oUzDHz5DaRSKc069eTglrXsWLWMLk+PLJx5SYmP5ZsZE1BaWDJozBtI5XJ8Q2qw5KM3eXvRctx9/ICCZGnvhj84tHVdwUJfM20H7zr4eXauXs53H07h5Xc/Q2lpBYBGlc+SmW+Rm51F18HPl2qsyly/RhAEobTKnNyUNPWempqKjY3NQwdkNBqLrJH5L+Hh4Xh5eT30dYUng6OrO6Omz+anWdN4rUdTmnTojrWdPeEHdpKenEid5m15etxkAMZ9NI+o65f5efY7bPl1CXVatCUjJYnw/buQyqRM+vJH5MqCBn6TvviBT8Y/y6T+7WjYphNO7p5cPnWM2FvX6Djg2VInFw/COyCYV2ctZNE7r3Pu8F4atesKwOn9O8jPzeXVWQvxDgg22/UFQRAqm1InNwMHDgQK1g288MILRWZD9Ho9586do1WrVmW6+PTp0+nZsye+vr5kZ2ezcuVK9u7dy7Zt24CCW0qxsbEsW7YMgPnz5xMQEEBYWBgajYbly5ezZs0a1qxZU6brCk+2jv2HElK3Eb/O/YBzR/ZhMBpwdHVn/Mz5tO09qPA8qVTKJ79vY/sfS9my4nsObVmHTCanUfuujJz8Hm7evoXnevgGMHvl3xzcvIajOzaRkhCHb0gNXpg6k9pNWpq92GWLrn0IrFmHnauXc/lUweLh9k8NpsvTI/HwDTDrtQVBECqbUic3dwv0GY1G7OzssLKyKjymVCpp0aIFY8aMKdPFExMTGTlyJPHx8Tg4OFCvXj22bdtG164Fnzzj4+OJiooqPF+j0TBlyhRiY2OxsrIiLCyMzZs306uX6R0tQsXIy8km9tZ1ZHIZftVrIVcoH3rMnKxMTu7ZhtFopFG7Lvdtq1AavsGhTJn3E8d3b0Wdn0edFm3w8Cm+k0kqldJj2Chadn+KxOgILK1sqBZSw+QtJmtbO7oOfp6ajZqjysvF3cevxF1ZZXXu8D4SYyPxq16LGg2amjzHwzeA4RNnlMv1KpP0bBVXotKxUMioF+yKvByaJqp0BiIz1EgkEOBogfI+YybmaEjL1+FoKcfL7uF/lgVBML8y95b68MMPmTJlSrncgqoIoreU+eTn5vD7wk84sHE1alU+AA4ubvQYNoq+L7zyQGtOVPl5zHllODcvnEGvL6gTI5XJ8A+tzbTFv2NrX7aq2FDQ+HX+m2M5c3B3YYNNiVSKh48fU7/+tchMR2pCHMvnzeTknm2FdWo8fAMYMGYC7fo8XWTcw9vWs2bJfOIjbhbG2bhdV0b8a5anLP5a+g0bfvyK/NycwsdsHZx4YepHtOrR/4HGvJ/KtOYmLUvFm4sP8Puuq6i1BWv5qrnZMnlwI/43qEGJs2H36y2l0RtYcS6F7TczyNMW7ISzU0rpUd2JoXVckUvvjXk9NZ+lZ5K5kJRX+FgNF0uea+AumjcKQgUwa+PMx51IbsxDnZ/PrLFDiI24Qe8RL9O4Qzc0KhUHN69h55rltO/7DGPe+7xMt2d0Oh0TercgMy2FHsNG0apHP6RSGUd3bGLL8u+xtLFl4eYjZe7i/c7wXty+fJ4O/YbQ7qnBWNvacebQHv76eRF6rZbP1+zB1cuH9OQE3nu+H0ajkb7Pv0JY05ZkpqawY/WvHN+5meETZ9B75FgAtv+xlKWfvkvjDt3oNvj5O2tujrJx6WJ0Wg0f/fIXrl4+ZYrzz0Wfs/6nrwit34Q+z43F0y+QiCsX2fDTV8RH3GTsh18WuY1WHipLcpOZo6bd66uIT81lytDG9GweQFauhp+3XuTnrZeY+ExD5r5iuvxDScmNzmDko73RXE7J56kazrT2tUNvNHIwKptN19Jo4m3L1DY+SCUSrqTk8+7uKKrZKxlQ04VAJwtisjSsv5LG9dR83m3vS0Ovx/MDniA8rsq9cWbDhg1L/Ufp9OnTpTpPqFp2r/uN21fO8+HSDQTVrlf4ePV6jQisVZclH71J+35DSrylYsq67+eTnpzIxC++p2nHHoWPB9SsQ72W7fn45cEs/+IjRs+YU+oxT+79m9uXzzN80rv0HvFy4eN+1WvRuF1Xpj/bk8XvTeTd7/9kzXfz0Wo0fPLbFpzdCxatVwuuQViz1qyYN5OVX31Km14DkcnlrJj/MV2feY4X3v648HelWlAozTr15J3hvVi1eC7jP5pX6jg1KhWbln1Lg1YdmTL/p8LKw9WCQmnSsTvvv9CPXz57r9yTm8piwZpwbsVncvzbYdTydy58vHVdb8ICXJiy+AAv9gwjLNCl1GMejMzibGIeMzv5Us/jXmJS3cWK2m5WfHIglpNxOTTzseP7U4n4O1owq5MfFvKCGUdfBwua+tgyc180355MYHGfIKSigJAgVEqluk/Qv39/+vXrR79+/ejevTs3b97EwsKCDh060KFDBywtLbl58ybdu3c3d7xCJbV3/UqaduxRJLG5q91Tg/Go5s/e9SvLNOaedb8TUCOsSGJzV+0mLandpBXHdm4u05jrvl+ArYMT3Ye8UOyYT1B12vZ5muvnTqFRqzi0ZS1dn3muMLH5p34v/Q+pTMqBzWs4vG0DBr2BgWMnFvsQ4ODiRrehL3J0+8Yit5b+y6Zl36LTanjmlSlFWioAWFpZM2D0BPKyszhzaE+px3yc/LTlIsO71CyS2Nz16oD6eDhZ89OWi2Uac8etDOp7WBdJbO5qXs2OYGdLdt7M5Ha6ihtpKoaEuRYmNnfJpRKG1nElIUdb5HaVIAiVS6lmbt5///3C/x49ejSvv/46M2fOLHaOKJD35EqOi6Zd36dNHpNKpQSF1Sc5rmw/H6q8XKqbSGzuql6vEdfPnSrTmOnJSQTVrlfiIueQOg3YvXYFqQnxqFX5hNRpYPI8W3tHvPyCSI6NRmlpibuPb4mLnEPqNESrUZORkoSVjW2p4oy5dQ2JVEpAzToljglw+9I5GrQuuf3IXZXldlNp6PUGopNyaFbL0+RxpUJGg+pu3E4oWyuVxBwt7QNKXqMV6mLJ5eR8EnO1AFR3sSzhvILNFEk5WvAoUwiCIDwiZV7huWrVKp577rlij48YMUJsyX6C2do7khRbcvKSFBuFrYNTmcaUyRUkxpRc9ToxJspkP6X7sbSxITE6gpKWmiXFRiGVybBzckYilZIUG2XyPK1GTVpSPLaOTtjYO5KRkoQ6P9/0mHeeg00ZFj/bO7tgNBhITYgrMU4AZ8+qV+NJKpVgb6PkdnymyeNGo5GI+Cxc7E0nHyWxVcpIupO4mJKYo8VOKcNOKSv8f5Pn3RnDzkI07BWEyqrMyY2VlRUHDx4s9vjBgwextCzbm41QdbTq2Z9DW9aRmZZS7Ni1sye5eeFMmXf3NGzTiXNH9xNz82qxY8lxMZzYtYVajVuUaczOA54lMSbS5O2cvOwsdq/9De+AYGztHWjUtjM7Vi0z2dbg4Oa15GRm0Kp7P1p264sqL5e9G4rfdtNpNWz/8xfqtmiHvVPp14c89eJrSGUytq74vtgxo9HIluVLUFhY0rrnwFKP+biQSCQM61yDn7ZcJCOn+Gu/9VgEV6PTGda5RpnGbRdgz+HobBJzNMWORWWqCY/PpV2APTVdrXCzlrPhaprJJPivK2nYKaU08BQLigWhsipzcvPGG28wfvx4XnvtNZYvX87y5ct57bXXePXVV5k4caI5YhQeA92HvoDCwoJZY4dw/tgBjEYjWo2ag1vWMXfiKELqNKRRuy5lGnPE5PdRWlgya+xQju/ail6nw6DXE35gFzPHPI1UJuO5t8rWFLXniDHYOjqzcOp49qxfiUaVj9Fo5MrpY3w8dgg5WRk8/+ZHAAwYM4HE6Eg+n/Aity+fBwpq+GxZ/j0/z5lBm96D8AmqjodvAB36D2P5lx+xcelicrMLZhwir13ii4kvEX3jKoPGlu13w9nNgzrN2rD1tx/5bf4sMlKSAEiMjuDb9yZyev9OOg8ajryMM1ePi8mDG6HS6Ok+ZS0HzsViNBrJV+v4cfMFnp25jc6NfOnYsGy9uroFO+JiJee9PdGcjM1BbzCiNxg5Ep3NB3uiqWavpL2/PTKphGfruXEoKptFJxIKk6HUPC0/nE5k640MhtQpvh5HEITK44G2gv/5558sWLCAy5cvA1CrVi0mTJjA4MGDyz3A8ia2gptP7K3rfDXtVaKuX8bS2ga9XodWraZhm86MnzmvzLelACKuXOSTcUPJycpArlAikUrRqlVY2djx5sKl1GzYrMxjpiUn8v7zT5GaEIdMrkAml6NR5aO0sGT0u5/RpteAwnMvHDvIdx9OJjUhDmtbe9SqfIxGAx36DeWFqR8Vrt3RabX8+sWH7F67AgALKxvysjNxcvPg5fc+p34p1sX8m8FgYM4rw7l08jBGoxELK2tUeblIZXLa9h7I2A++KPVYj9Oam7tOX0tixMfbuBqdjp21Eo1Wj1qr5+n21fnhrS7YWZteN3W/OjeJORrmHo7jWqoKS7kEgxE0eiN13K2Z3MobZ6t7yeK2G+ksO5NMntaAtUJKvs6AUiZhSB1XBtR0NnvVaUEQihJ1bu5DJDfmZTQauRp+nBsXwpHJ5NRr2R6foOoPPe7hv//i8Lb1YDTSuEN32j81+KEbUZ4/doCdq35Fq1FTs1Fzeo142eRMiF6n48yhPcTdvoGltTWNO3QzuYMKID05kVP7tpOfk4NXQBANWndCrlA8VJzJcdGs+W4emanJePj6M/DlSYUNP0vrcUxuAAwGI7vDozlzPRkLhYyeLQII8XG87/fcL7mBgp/R62kqLiXlIZFIqOthTZCT6VvqKp2BozHZpObpcLSU0aKaHTZKsdZGECpCude5EYTSkkgk1GzUnJqNmpfruK26P0Wr7k+V23hGoxGZTI67jx86rQZ7Jxf0Wq3J5EaVn0taYhypiXFYWtuQnpxYYnLj5OZBl6dHllucAG7evoz78MtyHfNxIZVK6NLYjy6N/cptTIlEQqiLVeGup/uJylSz53YW6fla7C3kuNsoqGtiK3lFS8/X8cuZJG6lq1DKpHQKsqdHsKPZOtELQmVXqpkbZ2dnrl27hqurK05OTvedjk1LSyvXAMubmLkRMlKS+HLSaG5cCMfZwwtrWztib13Hxt6BV2d9Rf1WHQrPPbR1HT98/DY6rQbvgBCy0lPJTE2mfqsO/G/ON4/Nz9DjOnNjTveb4TEYDEzfVVDNWCmT4GGrIDlXi0pnxN9BydxuASgryZqb384ls+pSKkYjeNspydXqyVDpsVFI+bSrP74OFv89iCA8Bsp95mbevHnY2dkV/re41yw8rgx6PZ+9/jwZKUlM+2YFdZq3RSKRkBgTyS+fvceXk0fz0S9/4R9amwvHDvLNu2/Qukd/hk2YhpObJwa9npN7/2bJh2/y1duvMPXrXyv6KQlmMPtgHJdT8hlW15WnajhhrZCh1hnYdiODn8OTeG9PNHO6Fm+0+qjtupXBnxdTaepjy5jGHrjbKDAYjYTH5zLvSBxv7Yjk14EhyMUMjvCEKVVy8/zzzxf+9wsvvGCuWATB7MIP7iLiygU++Hk9ofUbFz7uUc2fiXOX8ObTndj86xJemTmfDT99TXDt+oz7aF7h9L5UJqNZ54Iu9PPfHMvNi2cIDmtQEU9FMJM8jY7T8Tl0D3FkaJ17hRkt5FL61XQmS61n7eVUUvI0uJawqPlRWXEuBU9bBW+19kEhK/jQKZVIaOxty1ttfHh3dzTrLqfzTFjpyxAIQlVQ5nR++PDhfP/991y7ds0c8QiCWR3ftRX/GmFFEpu7FEoLOjw1hOO7NpOdmc7FE4foNGi4yXULTTp0x9HVneO7tj6KsIVHaNuNTHQG6BXiaPJ4z+qOGIyw4Ur6ow3sXzQ6A6n5OnqEOBYmNv9U190aT1sFe25nPPrgBKGClTm5sbW15YsvvqBmzZp4e3szbNgwvv32W65cuWKO+AShXKnycktskwDg4OqORqUq7APl6Opu8jypTIadozOqvFyzxClUnFytHgBHS9MT2053Hr97XkVR6QwAOFqZjlMikeBsJUejf6I2xAoC8ADJzXfffceVK1eIi4vjyy+/xMHBgQULFhAWFoaXV9UrBS9ULT6BIdw4H44q33TTw4snDuHlH4Szmwc29g5cPH7I5HnpyQnERdzAJzDEnOEKFSDMzRqAcyU0xjyXWPB4TRfrRxaTKbZKKQqphHMJpuPM1ei5kabC07Zib50JQkV44FVmdnZ2ODk54eTkhKOjI3K5HE9P043uBKGy6NB/KPl5Oaz9bl6x0vpXwo9zbMdmOg0ajlyhpEO/Iexe91ux9g8Gg4HfF85BrlDSplfVa3/wpGvkbYudUsbv55PJUhedncnXGlh2NglLuYQuQfffrWFuUqmUMHcr9kZkcjWlaF8zo9HIivMpaPVGXmjgVkERCkLFKXOdm6lTp7Jv3z7Onj1LnTp1aNeuHdOmTaNdu3Y4OjqaIURBKD/uPn48+8Y7rPhyJrevXKBDv8FY29oTfnAX+/76kxoNmtD1mYLGsP1Hv875owd4/8UBdBrwLGHNWpORksTuNSu4eeks4z+aj7Vdxf6BE8zjjZaefLI/lglbb9Mn1IlARwtisjRsvJZOSp6W/zXzrBQ1ZCa39GbcpltM3xVF92AHGnnbkqPWs/1mBheT82nta0dIKer5CEJVU+YKxVKpFDc3NyZOnEi/fv2oVauWuWIzC1HnRoCChcWbflnMjQvhQMHamk4Dh/PUC+NRWt77Y5Cbncn6Hxayd8Mf5GYV9IwKa9qafqNeo07zNhUS+z+J+jUP7r8qGZ9LyGXR8QQSc7UYAQngYi3n5cYeNK9Wed47slQ6PjkQy7XUfO4ur7FRSOkV6siIeqbXjAnC48is7RfOnj3Lvn372Lt3LwcOHEAmk9G+fXs6dOhAhw4dKn2yI5Ib4Z+y0lPRajQ4urghu08TSp1WQ2ZqChZWVg/UI8tcRHLz4P4rubkrQ6UjIVuDh60SpxIW71YGKp2BqEw1NgopPvaicJ9Q9Zi1/UL9+vWpX78+r7/+OlCQ7MyfP5/XX38dg8GAXl+xOwiE0jEYDBzdsZFdq5cTffMqFpZWNOnYgx5DX8TDN+CBxz17eC/bVy7l5sUzSGUy6rVsT49nXyKgRliR85Ljovlx1jSuhh9Hp9MiVyip06wNL70zu8QdSuUp9tZ1tv3+E+EHdqLVaAioGUaXZ56jSYfuJotUyhVKXDy9zR7XkyAiIYuv155h7f4b5ORrqeXvzMt96zK0UygyWcXf6vk3R0t5iTun7joQmcnycymk5OkAI7ZKGb2rOzG4TtGdeTqDkT23M9l+M4P4bA3WChlt/OzoHeqEi/XD9SCzlEv/s6VERr6OzdfTORCZRY5Gj4etkq7BDnQOdEDxr9f+XEIum66lcyUlH6kE6nva0CfUier/ukaeVs/fNzLYE5FNukqHk5WcDv52dA92fOA+XFq9gd23s9hxM4OEHA02Shnt/O3pVd2pUieYQuXxQI0zw8PD2bt3b+HsTVZWFg0aNKBjx458/vnn5oiz3IiZm4IqvYtmvM6Rv/+idpNW1Gnehuz0VA5tXY9GreLNBUup1bhFmcdd+dUc/vp5EQE169KkQzc0ahWHt20gPTmBV2YuoOWd3lC3Lp3lw1GDMBoNtOz2FD5B1Ym4epHju7agUFrwyW/b8PIPLO+nXSj8wC7mvzkWWwdHWvXoj7WdHWcO7uH6uVN0fnoEo6Z98thU4X7cZm6OXoqn11vrkcmkDO9SEw8nK/aciWHXqWgGtA1m5fu9kD+iBKe0Mzf/ZcmpRLZcS8fZSk6HAHuUcilHorOJyFBTw8WSz7oFAAV/sD/eH8PZhDwae9tQy9WalDwt+yOzkEslfNTRl4ASGniWh5gsNTN2R5OvNdDe3x53GwVXU/M5GZdDLVcr3u/gi8WdlhJ/XEjht/MpBDha0LKaHVqDkYNRWSTlanmtmSedgxyBglmtd/dEk5CjZVD76tQNcuHCrVRW77uOh42Cjzv6lrhVvSRqnYGP9kVzKTmfxt621HSxIilPy/6ILCzlEmZ28hMtJZ5QZr0t5eTkRE5ODvXr1y+8FdWuXbv/vFBlIZIb+Hvlzyyb+wGvz/mG5l16Fz6uystl7hujiL5xhYWbj2JhVfqFiOEHdvH5hBcYPnEGvUa8XJgc6HU6vvtgMkd3bGLehgO4eHoztnMDZDI57/+0Bo9q90rYR9+4wkcvPY2VrS0LNx8tvyf8D9kZ6bzeuwV1mrfh9TnfoFDee5Pcs34l33/0Jq98vJA2vQaY5frl7XFKbtQaHcHPLiXIy55Nc/phb3Pvtd94+BZPv7eZWaNbMWVo8QKL5lAeyc3VlHym7oiknb89E1p4IZMW/NwbjUb+uprOT+FJDA1zYVg9N349m8xfV9N4t3016v2j+WaWWsf7e6JR6Qws6h2E1AyJtdFo5I1tEegNRmZ28isy+3E5OY8P9kbTNdiR0Y08OJeYy7u7o3m2riuDw1zu/S4bjCw+mcCuW5ks6h2Et52S2QdiuJ2jZ8+CZ6jhd+927bXodDpOWI2/jZTpbauVKdafTiey7UYGH3T0pbbbve32Gfk63tsTDcCCngGPzQcQofyUJbkp80ekX3/9ldTUVE6ePMncuXPp06fPY5PYCAVvctv/WEqLrn2KJDYAltY2jHn3U3Iy0zmy/a8yjfv3yp8JrtOA3iPHFnnTkcnlvDhtFgqlkl1rVxB+YDfZ6akMn/hOkcQGwDekJk+Pn0xqQhwRVy4++JO8j/0b/0Sv0zHm3c+KJDYAHfsPpW6Ldmz/Y6lZrv2kW3vgJvGpuXw3pUuRxAagb6sghnetwTfrz2IwPD5F534OT0IpkzC+qWdhYgMFBfT61XQm2NmSrTcy0OoNbL+ZQY8QxyKJDYC9hZxxTTyJy9YSHm+eopAXk/OJyFDzcmOPYrd1arlZ0yfUmV23MlHpDGy+lo6/g0WRxAZAJpXwcmMPbBRStl1PJylXy7GYHD4a3apIYgMQ6uvEzNGtOB6TQ2KOptRxqnUGdt7KpG8N5yKJDRQUKxzT2J3ITDXnS6hBJAh3lTm5EcnM4y03K4P4yFs07djD5HEP3wD8Qmtz/dzpMo1743w4TTp0N3nMysaWOs3bcv3caU7u3QZAkw6mr9+0Uw+MRiPHd20p0/VL6/r5cGo0aIq9k+leO0079eDG+dMYxNqxcnfsUgI1/Zyo5e9s8nj/NiFEJmYTn/r4VH2OyVLTyMsGK4Xpt9JWvnZka/TE52jJUutpUcIuq1AXS5wsZVz5V72a8nI1JR9rhZS6HqYLD7b0tSNPayA6U83VVBXNq9manBlRyqQ09rblSqqK66n5GIH+bYJNjjmgbTBG4FqqqtRxxmRpyNUaaF7N1uTxOu7W2Cqlxer6CMK/iZVZTxiptGCBn1ZT8qcpnUaDTF62hYBSmfS/x5TJ7l1fqzZ520urVgMgVzzc4soS45RK0WpLjlOr0SCVyaCCp7wfp9tNpSWTSlBr9RiNRpN/ONV32hnITfRJqqwkEgma+8w06fRGJMDdSR1tCa0QjIDOQJHZn/IklYDBaERvBLmJS2j1Ba0cZBIJUknJcRaca0QmofD2mVpj+oPA3X/Psiyh+q/XyWC88zqJW1LCf6h8WxMEs7K2sycorD4Ht6w1efz25fPE3r5O3eZtyzRuneZtObxtvckZj8zUZM4d3U+d5m3oOGAoSCQc2rLO5DgHt6xDKpXSts+gMl2/tOq2aMf1sydJjIksdsxoNHJoy1rqNGtTKQq0VTVdmvhxOz6LwxfiTR5fseMKdYNccHeq2LYGZVHDxZIz8blk5OuKHTMYjeyJyMTJSo6XrRJ3Gzl7IzJNjnM6PpdsjZ4GnjYmjz+sBp42qHRGjsVkmzy+NyILJ0sZvg4WNPCw4UBUFnoTSVuORs+JuBzqe9oQ5m6FUiZhxU7TfQVX7LiCQiYpdnvpfnwdLHCykrMvIsvk8eOxOah0BrO9TkLVId7Bn0C9R7zM+aP7Wf/DQvS6e2/KiTGRfDNjAp5+gTRq17VMY/Z8djSJMZH8NPsdNKp7U8ZZ6aksmDoeS2tr2j81hKDa9XH39mXlwtlcPHG48Dyj0cipfTtY/+NXVAupiZu378M/URNade+Hg4sbC6eOJz05ofBxnVbDb/NncevSOXqNGGOWaz/pujf1JyzAhVGfbuda9L2O2nq9gS/+OMXGw7eY+Eyjx2qh6EuNPACYcyiWTNW93yW1zsC3JxJJyNEyJMwVmVTCUzWc2RORxbYb6Rj+sY8jIkPFNycSqOFqRQ0X8+yWCnSypL6HNUtOJXIj7d5tIqPRyO5bBVvT+4Q6o5BJ6FvDifR8HV8diy9szgmQrdbz+aFYZBIJ3YIdsbeQ0zHAgY+WHmXb8Ygi19t+IpIPfj5KhwD7/9xG/09yqYS+oU7suJXBzlsZRVqk3EpT8d3JBOq6WxPkbL5dZULV8EBbwR9nYrdUgdXffsnaJfNwdvekdtPWZKWlcP7YAVzcvZi2+De8/IPKPObeDX/ww8dTsba1o16rDmhU+Zw9tBeFhSVvLVxKaP0mAKQlJzL1mc7kZmUSWKseviGh3Lp0npibV3FwceOLdXuxtjXfuq6IqxeZ8+oIcrMyqNeyAzZ29pw7up/s9FRGTH6fns++ZLZrl1ZVvC0FcCsuk+5vruN2fCadG/vh5WzDvrMxRCVm89awxnwypvUjS27Kayv41uvpLDmViARo4m2LhVzKybgc8rQGOgfa83qLgvpIRqOR704msvVGBp62Cmq5WpGSp+N8Uh6+9ko+7Oj70LVu7idDpePDvdHcSlcT5maFu42Ca6kqYrM1dAp04LVm9xZF74/IYsGxOCxkUhp52aAzGDkVn4tMImF6O5/CRdFqnYHZB2MJj8+lYYgrdYPduHArhdPXk2ngacP0tj6F28tLy2A08vWxBHbdzsTbTkkNF0uScrVcTM4n0NGCDzqUfXu5UDWU+1bwv/4q/c6Zp556qtTnVgSR3NwTceUCu9asIObmVZSWVjTt1IPWPQdgZWN6MV9pJETdZtea5dy8cAapTE79Vh1o328I9k5FF5FqVCrWLJnHwc1rUKvysbK2pdPAZ+n74qvI71MpuLzkZGVwYONqTh/YhU6jJqBmHTo/PYJqQaFmv3ZpVNXkBiA3X8tvu66ydv8NcvM11LxTxK9JDY9HGkd5JTcAsVlqfjydxLVUFUaMeNoqea6+G/X/dfvEaDRyJSX/ThE/LdZKKW387GnjZ4fyEdT30eoNHI7O5kBkFtkaA562CroGOxLmZlUsqUzM0bDtRsadIn4S6nta0y3IsVhioTcYOR2fy+7bmaSr9ThZyOgYaE9jL9sHXkNkNBq5lFzwOiXmaLFRSmnrb09rX7tixQaFJ0e5JzelXX8gkUgqfYVikdyYX1Z6GlHXLiGTywiq3aBM9XJKkp+bw61L58BoJKBWHWzsHEo898yhPURevYizhxetew54LNfPVOXkprIoz+QGCmYxrqep0BuMBDpZYG/xaGcXTsVmczg6B2ulhGdqu2Fv+WDVgf/pRmo+f9/IQCaFfjVd8LJTlkOkgvBgyr39gsFg+O+ThCdeTlYGy7/4kMPb/kJ3Z0eStZ0D3QY/x6Cxk+7bu6kkGrWKP77+lD3rfkeVV7BFWGlpSds+TzP8jRlYWt/7ZHxg02p++fx98rLvLUb88eO36TbkBZ59452HfHaCYJreYOT3CylsuZZOrrbgvVIuldDWz47RjT2wfcAWBKV1KjabTw/Fof7HDqPN1zLwtFPwVY8AZLKyXz86U8VbOyLJ1xq5O+rfNzNxsJCxsGdQuSROgmBO4salUC5UebnMGjuU1IRYBr/6Jo07dEerUnFg8xo2/rKYpNgoXp31VZnWUxj0eua/OZaLJw7Re+RYWvXoh1Qq49jOTWxcupjYm9eYtvg3FEoLDmxew7cfTMY3uAb9Z3xKcFgDEqMj2Lx8CZuWfYsqL5dR0z8x4ysgPImMRiPzj8ZzKCqLvjWc6Xin/cLxmGxWX0olIkPN7C7+JdbBeViXkvKYdSAWOwsZLzR0pZGXDTkaAztuZrDtRgYvbrjFsoHVyzRmap6GN7ZFIJdKGFHfjZbV7NAZjOyLyGT9lTRe+us6KwZVR/kASZMgPCoPlNzk5uayb98+oqKi0PyrtsndhprCk2XXmuXE3rrOx8s34Vf9Xmf44aEz8K8RxjczXqfzoBFl6ll1at92zhzczVtfLaNB646Fjw8YPYGwpq354MUBHN62gfZPDWbZ5x9QLag6Hy7dUHgbzM27GmHNWvP19NfYs/53hr7+tlkXKgtPnkvJ+eyPzGJSSy/aB9y7Vdq/lgsNvGyY/Hck229m0K+m6cKFD+vTQ7EoZVI+7xaAu829xcghzp542Sr4+Uwy226k0SOk9NeftT8WvQHmdPEr0iTzuQbuVHexYs7BWL45nsgbLUUjWaHyKvPHifDwcEJCQhg2bBivvfYaH3/8MW+88QbTp09n/vz5ZghReBzs3fAnzbr0KpLY3NW6Z3+8/IPYu+GPMo8ZUrdRkcTmrtD6TajXsj1716/k4snD5GZl0O+l/xVb3yORSBg0dhJ6nY61S+aX6fqC8F923srAx05JO//iSXOAoyWtfO3YcTPDLNfW6/VkqfV0C3Yoktjc1TvUCRuFlJXnU8s0bmSmmqY+tsW6fwO0qGaLn4OSQ9Gm6+UIQmVR5uRm4sSJ9O3bl7S0NKysrDh69CiRkZE0btyYuXPFIsgnVWpCLIE165g8JpFICKhZh9SE2HIbEyCodj1SE+KIvHoJgMCadU2e5x0QjNLCkoToiDJdXxD+S1KujiAnixJvt4Y4W5KcpzXLtVPy9RiMEFxCzReFTEqAowV52rKtmTQaC+I2RSKREOpixZNVQER4HJU5uTlz5gyTJ09GJpMhk8lQq9X4+vry2WefMX36dHPEKDwG7Byd75s8JETdxq6Efk4POmZ81G3snJxx9yko+FfSuenJCWg0ahxd3Mt0fUH4Lw4WMuJzSk5e4rI1Zts15WQlQwLEZ5u+vt5gJC5HW+Yt5hIJxGeX3KIkNqv0jTAFoaKUOblRKBSFn1I8PDyIiooCwMHBofC/hSdP614DOLx1fZGqv3ddOnmE25fP06bXgDKPeeHYASKuFu8QnhB1m5N7/qZNr4E0atcVSysbNi/71mT7hy3Lv0cqkTBgzIQyXV8Q/kv7AHtupKk4n1i82WdqnpZ9EVl0CDDPOi+lTIaVQsKWG+nkmOjvdCg6m/R8HT2ql1w2wRQ3awUHorJIyi2eNF1NyedySj513B+fFhnCk6nMyU3Dhg05efIkAB07duS9995jxYoVvPHGG9Sta/q2gFD1dR/6Ila2dswcM5hT+7Zj0OtR5eexe+1vfDlpNDUbNqNhm85lGrNVj374hdZmzqsjOLBpDRq1Cp1Ww9HtG5k1bijuPr607zcEqVRKn+fHcenkEea9+TKR1wpuU6UmxPHr3A/Y/OsSwpq1wcXDyxxPXXiCNfG2pbabFbMPxLL9RgYqnQG9oaCH04zd0VgrpPSu7mS2649t7Em2Ws+MXVGcScjFYDSSo9Gz4UoaC47GYyGTMCysbDOmU1p5YzTCtJ2RHI7OQmcwotYZ2HUrgw/3RiOTwCSxmFio5MrcfuHkyZNkZ2fTsWNHkpOTef755zl48CAhISH8/PPP1K9f31yxlgtRxM98EqMjWDRjAjfOn0YmlxfOojTr0psx7372QK93Vnoa370/ifCDu5BIpUgkEgx6PWFNWzN+5jyc3e8lLL8v+IStv/2ATqtFJpej1+mQymTUbdGONxcsfayK+YkifuZXXkX88rR6vj6WwOE7i2ylEtAbC5pqTmzpbfbCd2svpbLifAo6g/FO92+QANYKKV/3DsDZquzXPxKdxReH49AaCsaCgs7lSpmE2Z39CDGx2FgQzK3cKxRXJSK5Mb9bl85x8+IZZDI5dVu0LZcmmHERN7l88ghGjNRo0BTfkJomz1Pl57Fx6TfERdzEwcWVp158DWe3R1vW/35E0lJ5lHeF4sQcDWcT8tAbjVR3sSpxUa456PV6lp5N5mJSPkqZhCF1XGno9eBtVO5aeymVg1FZSCUSelV3pFOQ48MHKwgPqNwrFJuSlJTE1atXkUgk1KhRAzc3twcdSihHBoOBC8cOcGrfDrRqFX6htWnTeyC29o7Fzs3OSOfAptVE37iChZU1TTv2oHbTVg/duDCodj2Catd7qDH+SZWXw/Y/l3Lh6EGMRiO3r1xg2OvTsbUvvpYgOz0NmUyOhaUVCoUFWanJJpMbg17P2SP7OHNgF9o7vaXa9BqItV3lq4Nz7mYyv+28SlJGHn7udjzfozaBXmVbR/Fve8/E8MHPR4hLzcXFzpLJQxrxdIfifbX0egNbjkWw9VgEGq2eJjU9GN6lJnbWDz4bYTAY+HrtWZZuu0SuSkcNX0c+HdeGWv7Fb59k5qhZseMKp68nYamU06tFAD2aBSA10bPo911XmL8qnPQcNdXcbPl4VCta1X10t088bJV0C/nv1+VUbDbfn04iQ6VDIZPSNciB4XVdilUSNhqNnE/M42hMNiqdEX9HCzoGOmBvUbx4Xkq+nqRcHfk6A1qDhBtpaup6WCM3MVt5PTW/sLeUl62CTkEOuJbQsHNgbRcG1i7bba370d25ZXc2MQ+D0UhNFyva+tubbK6Zkqdl961M4nO02N3pLWVqe3pFU+kM7I/M4mpKPjKphIaeNjTzMd1XKypTzd7bmaSr9LhYy+kU6IC3aGlhFmWeucnKyuLVV19l5cqVhX2kZDIZQ4YMYdGiRTg4PNybrrlV5Zmb9ORE5r7xIrcvn8fDNwBbB0cirlxELpcz9sMvadG1T+G5Bzat4YdZUzEajATUCCMrI5WkmCiq12vM5Hk/Yl/GnU3mcmzHZhbNeB2dVoNPUHWkUhnRN64gk8t58e1P6DRwGFDwh2DNt1+y7seFWFrbUC0olOS4aDJSkmjetQ/jP5qH0qLgk3RqQhyfT3iBqOuX8fIPwsrWjsirF1EoLXjl4wU06dDdbM+nLDM3Gq2e0Z/vZMWOK3g4WRPoZc+VqHQyc9W8OfTBOmgbDAbavPYnxy4nYmeloIafE7fjs0jNUhHoZc+ZH4ZjeydxiUjIou/bG7gUmUZNPydsrRSEX0/GxkrBihk96NUisEzXBrgZm0Gzsb+TkavBxVqOk6WciAw1eoOREd1qsnTavdd+w8GbjJy1DZVGT7CzJXlaAzGZauoGurBxTj983Qt+fzNyVDQYtYLo5BwcLGS42yqIzlSj0hlpW8+b3fMGmbwlWd4zN6Xxvy23iM7UoJBJ8HOwIDVPS7pKj0IqYX6PAKo5WACQqdLxycFYriTnE+Bhh5ujFWdvpoDRyCtNPekYeO999sfTiWy6mg4SCHS0JFujJylXi5Vcyidd/AhyKvi5V+sMfHEkjmMxOThbyXGzlhOZqUGjN/BsXVeeCXM163OPzlTz8f4YEnK0+DtYIJfCrXQ1tkoZb7f1KbJIefWlVFacS0Ypk+LvoCQlT0dqvo5mPrZMaeVd5k7j5nIuIZfPDsWSqzUQ6GSJVm8gKlODt52CGe188bEv+F3SG4wsPpnAjjstLDztlMRlqcnWGOgT6sRLjdyRPuSHyieBWW9LDR48mDNnzvDVV1/RsmVLJBIJhw8fZsKECdSrV48///yz1GMtXryYxYsXExERAUBYWBjvvfcePXv2LPF79u3bx6RJk7h48SLe3t689dZbjBs3rtTXrKrJjUGvZ8bIPmSlpfDqrK+o2ag5EomEzNRkls39gGM7N/PeD6sJrd+Ei8cP8cn4YbTt8zTPvvEO9k4uGI1GLhw/yKJ3XsfTN4D3f1r70DM4Dys+8jZvPdMZn8AQXvvkK6oF1wAKdkotfu8Nbl48ywc/ryekTgN2rFrGz7PfYdDYSfR+biyWVtbodTqO/P0X33/8Fq17DuDl9z5Hp9Uy/dkeqHJzeW3211Sv1xiJREJ6cgK/fPY+p/bt4MOl68t15umfypLcvL5wL0s2nuebiZ0Y2a0mCrmMPJWWhWvO8M4Ph/nilba88UyjMl1/wIyN/HXoFrNGt+L1QQ2wtlSg1elZvuMK477YTS0/J878NAK1RkeD0SvQ6Y2smNGDZrU8AYhJzubV+XvYcSKKo4uHUC+49DO2BoMB9/5L0Ki0TG7lTSMvGyQSCVlqPb+eTWb7zQw+eLEF7z7XnFNXE2n96p808bZhTCN3XKwVhR21vzwaj6uLLad/GI5CLqPWc79wKzaT15p50s7fHplUgkpnYP3lNH6/kMKQjqH89l7x95RHndx8uDea0/G59KvhxJA6rtgoZRiMRo7F5DDvSBwAfw6ugdFoZNquaFI0Bpa/25POjXyRSCQkZ+Qx9duDLPv7Mh919KWepw3brqez+GQibfzsGN3IAycreWFH7c8OxaLSGfh1QHWUcinzjsRxJDqb15p70drXDplUQp5Wz5pLaay+lMr/mnvSxUy3nfK0el7bchsbhZRJLb0JvJNwJeZo+Pp4AtdSVSzoGYCnrZLdtzJZcCyeQbWceTrMBWuFDL3ByJGYbL46Fk8zHzsmt6r4Bc2xWRombrtNLTcrXm3mVVhI8Waaii+PxKHRG/iqVxCWcim/nEli/ZU0Xm7sQZcgRxQyCRq9ga3XM/g5PIln67ky2MzJZVVQluSmzOnv5s2b+emnn+jevTv29vbY2dnRvXt3vv/+ezZv3lymsapVq8acOXM4efIkJ0+epFOnTvTr14+LF4tv/QW4ffs2vXr1om3btoSHhzN9+nRef/111qxZU9anUeWcPbyXiCsX+N+cb6jVuEVhYuLg4sYrMxfgExjCpmXfAvDX0kUE1q7Hy+/PLZyhkUgk1G3ellc/XsC1sye5dPJIhT2Xu5bNfR+AqV//WpjYAHj6BfLWV8tQKC34de4HGPR6/vp5EW16D2LQ2IlYWhV8ApTJ5bTpPZCh/5vG/o2rSEuK5/T+HcTcvMYbc5cQWr9J4evk5ObJa598jbuPL5t//e7RP9l/SUrP4/tNF/jwxRaM6hWGQl5wK8LaUsHbw5syuncdPl95Co22+BbgkuTkadh2LILRvevw9vCmWFsWvBkr5DJe7BnGzJdacjEilUu3U1m97wbXojNY93GfwsQGoJqbHX++3wtPZ2vmrw4v03P6cfNF0rPVTGjhRWNv28LX3t5CxitNPajpasW8P08DMHflKdxtFExp5Y3LnVsmEomEWm7WTG3tw6XIdP46dIsTVxK4Fp3B8/Xd6BjoUHgrwFIuZWhdVzoE2LN2/3U0Gl2ZYi1ver2eswm5NPay4cWG7tjcaaYplUho6WvH+KaeqPVGNl1N43xSHpeT81j2Tg+6NPYrfJ3cHK354a2uNAp1Z82VNABWXkyhmr2SSS29cbIqWGUgkUgIc7fm7TY+qHRG1lxKJTFHw76ILF5s6F6YAAJYK2SMrO9Ga187Vl9MxWCmJZh7bmeRnq/j3fa+hYkNFNzKm962GkqZhE3X0jEYjay6lEJLXzuea+COtaLgdZJJJbTxs+elhh4ciMy6bx2eR2Xj1TSsFVKmt61WpEJ0sLMlM9pVIzlXx/6ILHI0ejZfS+fp2i70rO6EQlbw2itlUvrVdKZPDSc2XElDrRMNqstTmZMbFxcXk7eeHBwccHIq25bHvn370qtXL0JDQwkNDWXWrFnY2tpy9OhRk+d/++23+Pn5MX/+fGrVqsXo0aMZNWrUfSsjq9VqsrKyinxVRSf3/o1PUCih9ZsUOyaTy+nQbyin9+8kNzuL80cP0LH/MJNT9XWaFywAPrn30U/Z/9v1s6do3L4rTibWzNjYOdCqRz+irl/m9pXzpCbE0WngsybHaf/UYADCD+zm5J6/CahZ1+TMjFyhoP1Tgzm5528qep393TUuo3ubrtA8pm8dEtLyOH6leF2hkvzy9yU0OgNj+poec3TvOhiMsGDNGTYcukmrMC/qBBb/NGmhlPN8j9qsP3Cz1NcG+GnLRRwsZDT1Kb7QVSKR0D3EkcxcDdei0thw6CadAuyRm1i3EOJsSYiLJRsO3WLB6jNIgC7Bpm+Hdw9xRKs38sfe62WKtbwdjM5BbyyIx9SMaBs/eyzu/IE/FpODn7stXZv4FTtPKpUwuk8dzsTnkpmvIz1fT7dgR5PrO2q6WuFjp+RAVBbHY3OQSyV0CjT9OnULcSQ+R0t0pnmShmMx2TT0sjHZJsJKIaW9vz3HYrKJydIQl62le7CjyXHaB9ijkEk4HptjljjL4mhsDu0DHEzeIvOyU1LPw5qjsdmcTchFrTfSPcTR5Djdgx3J0Ri4lJxv5oifLGVObmbMmMGkSZOIj48vfCwhIYE333yTd99994ED0ev1rFy5ktzcXFq2bGnynCNHjtCtW7cij3Xv3p2TJ0+i1Zqu0jl79mwcHBwKv3x9H37nTmWkVuVj7+Rc4q0kOycXDHo9+TkF21XtHE030pNIJNg5OaNRVfwvmsGgx9655LU/do7OGAyGwlhLWidkZWOLQmmBRpVf+DqVOKaTC1qNGqOhYj9F5al1yKQSnO1N77hxcyhYWJmnKv2MRGaOpsj3/puTnQVSqYScfA25Ki2ujiUv3nRzsCJPXbbZkHyNDjsLWYlrCxzuLJRNy1Gh1hqwtyy567SdsuAWXa5Ki1wqwaqENRh3F99m5KjLFGt5y1QVzLA5WJrew6GQSbBWSNHojaj1BlwdrEr8XXa78++SfWc2ysHEAmMo+F12sJQVjKkzYimXlLhW5e4Yar15fu5VemOJcQLYW8pQ64yFsxclnWshl2Ill5otzrLQ6Az3fU4OlvI7z6ngg1JJ5zpYmve1f1KVOblZvHgxR48exd/fn5CQEEJCQvDz8+Pw4cN89913NGrUqPCrNM6fP4+trS0WFhaMGzeOdevWUbt2bZPnJiQk4OFR9FO8h4cHOp2OlJQUk98zbdo0MjMzC7+io6PL9oQfE77BNbh58Qy52Zkmj58/uh/3an44uXvi6OrO+WP7TZ6XmZpM5NVLRW4DVRQ7R2fOHt6HoYRE4+zhPVjb2uEdEIJMLuf80QMmz7t+7hTq/DyqhdTAN7gG18+dQpVXvKIswPkj+/AJCkUqK/lNy5QhmXNL9VVadQJd0BuM7AmPMXl8+8koJBKo5V/6bs+dGxck9jtOma4kvic8BoPBSOu63tQNdOXg+TjyS0hgtp+Moq6JWZ37qRPoSmyWhpQSei2FJ+Qil0qoF+RKDV9HziaY/jfK0+q5mqqiTqALLWp7ojUUrMUx5UxCLhKgYwOfMsVa3pr62CKVFMRjSnSmmnSVHn9HC/wdLDh/K5XENNPn7jgZhYu1Ag8bBQqppMQxczR6rqeq8LG3wM9RSbbGwI00lclz7772Xrbm2bkT4GDBucQ89AbTM6Jn4nPxd7TAy0553+d0K01FplqP/52F1xXJz8GixDi1eiPnE3Pxd1Di71gQ65mEPJPnhsfnFo4nlJ8yJzf9+/dnypQpvPPOO4wcOZKRI0fyzjvvMGXKFPr161fkqzRq1KjBmTNnOHr0KOPHj+f555/n0qVLJZ7/708zd28flPQpx8LCAnt7+yJfVVGHfkPQ6/T8Nv+TYsnAldPHOPL3X3QeNBKZTEangcM5sGk1Ny4UXTNh0OtZMe9jZHIZbfsMepThm9Tn+XEkx0ax7bcfix3b99efRF69ROdBw3FwcaNJxx5sXLqIlPiizTlV+Xn8tuATPHwDCGvamo4DhqFW5fPH158Wu/V04dhBju/eSpenR5j1eZVGm7re1Al0YdqSg2TlFp11iEvJYfby4/RuGVi4Y6g0mtf2wtPZmo9+OUZcStFp/ew8DVO/O4i1hZxxT9VlTJ86pGereP/nI8Vep63HIth89DYvP1W2iuSfjWuDRAI/nk4q9kfuVpqKv29k0LiGO9aWSsb3q8+R6JxiCY7RaGT5uRTUOgMv9a7D5MGNsVBI+Tk8CdW/1iwk52pZdTEVHzdb6gRVbKkKLzslVnIpG66kEZNV9N9Tqzfw4+kkpBJ4o3nBTiipBN5cfAD9vz7NH7+cwC/bLtE1yB6FXEY9D2v2RWZxManoH06D0cjS8CR0BiMvNHCjsZctbtZyfg5PKra2IyFHw/rLabTxs8PuPjMRD6NHiCMpeTrWXC7eofxgVBYXk/PpEeKIrVJGW3871l9JK7auRq0z8POZJFys5TTxfvgaPg+rR3VHzibmceRfHdKNd9YNpav0dA9xItjZkurOlvx6NrlYm4wMlY6VF1Ko52EttoSXs0pXxK9Lly4EBwfz3XfFF3W2a9eOhg0bsmDBgsLH1q1bx+DBg8nLy0OhMF2r4Z+q6m4pgL3rV/L9zLcICqtPh35DsXVw5OzhvRzaso7q9Rrz1le/oLSwRJ2fz+xXniXi6gXa9n6aui3akpmWwp51K4m8dpFXZs6ndc+y9YEylxkj+3Dr4lnqt+pAqx79kcqkHNuxmZN7/8YrIITPV+9CKpWSlhTPh6MGkZ+bTaeBwwmqXZ/EmEh2rfmVrLRU3l60gtD6jQEKd1aF1m9C+6cGY2Vrx5mDuzi0dQNhTVsxZf5PyBVle6MxR3G+U1cT6TJ5LU62Fox9qi7VqzkRfj2J7zddwEIhY//CZ/D3LFuyfvh8HJ0nrcHGUsH4/vVoWN2d6zHpLFp3joS0XH6a2pUR3WoBMH/VaSZ/c4C29bx5rnttbK0UbDpymz92X6Nn8wBWf9QbeRmbMk799gBz/ziNn4OSHiFOOFnJOJeYx86bmVgoZVz+9Xm8XW3RaPX0f+cvdp2Kpp2/HU28bcnVGth9O4vLyXl8/UZHxvcrWDf1/cbzvDJvNy7WcnpVd8LLVsn11Hy23shAZ4SDXw+mcY3i67Ye9W6pG6n5vLUjErm0YH1RbTdrUvK0bLmeTny2lg6B9rzRomAX0P6ILOYdjaN+sCsv9amLu6MVO09FsWzbZQIdLfiwQzUs5FLyNDpe3nSbPI2eDgEONPa2IUdj4O+bGdxMU9E50J7X74x5MSmPD/ZG42Ilp0eIIx62Sq6m5LP9Zgb2FjLmdPHH0co8TT4Bfj+fzMoLqTTwtKa9vwNymYSj0dkcjs6mnb89b7T0QiqRkKHSMW1nFJkqHV2DHanpakVSrpZtN9JJydPxbvtq1POwMVucpWUwGvnicByHo7Np5WtHi2p2aPQG9kZkcS4xj+H/2AEVmaHmnV2RKOVSegQ74utgQURGQUJvBOZ08Td7JeuqwOwVijMyMli9ejU3b97kzTffxNnZmdOnT+Ph4YGPz8NN/3bu3BlfX1+WLl1a7NjUqVPZuHFjkZmd8ePHc+bMGY4cKd3unqqc3ACcP3aATb8sLrxF4+LpTedBI+g1YkxhnRcAdX4+m3/9jl1rlpOenIhEIqFeqw489cJ4ajU2veapIhgMBn6e/Q6Htq4rvJVkYWlF0049GffRvCKLojNTk9nw0yL2b1xFXk4WcoWSZl160X/Ua8Vus505tIdNv3zLpZOHAXDz9qXL0yPpOfylMic2YL7Kw1ej0pm94gR/7rmGWqvHzlrJc91r8fazTfB2fbBPr6evJTLq0x1cikhDf6dkf4CnAwteb1+sds3Gw7f48o/T7D9XMCMW7O3A+H71eG1g/cIdXGX17YZzfPTLURLTC24lyaUSWoR58ccHvfB0vvdHS6PVs3DNGb5Zd4bIpIKZpg71fXjz2Sb0aBZQZMwNB28yadE+IhOyMQIyCdQJcmXZ9G4lztpURJ2bW2n5fLQvhky1nruTVxYyCQNruTC0btHbfBeS8lh7OY3TcTkYARcbBd2CHBhQ07nI2pkcjY7PD8VzMSkP7Z1B7S1k9K/pzKB/FeCLSFex6lIqR6Kz0RvBVimlc6ADT4e5mK17+T8djMpiw5U0rqUW3B6rZq+kd6gTPUIci6zFylLrWHMpjV23MsjWGJBJoIWvHU/Xdims21MZ6A1Gtt5IZ/O1DOLuzDTVdLWiX00nWvkW/eMbn61h9aVU9kdmodEXrIFqH+DAM7VdcDOx0FoozqzJzblz5+jSpQsODg5ERERw9epVgoKCePfdd4mMjGTZsmWlHmv69On07NkTX19fsrOzWblyJXPmzGHbtm107dqVadOmERsbWzjm7du3qVOnDmPHjmXMmDEcOXKEcePG8fvvvzNoUOluo1T15OYuVX4eOo0aazuH+/ZUMhgM5GVnolBaYmFV+ap//lNKfCxGowEXT5/7Pie9TkdeTjaW1tYolPe/j333dbKxN72LpbTM3VZBrdGRna/FwUb5wEnFv+WpNEQn5+LlZI297f1fp5x8DRqtASc7i3Krf5SSkUd6tppALwfk9ynKZjQaSc9Wo1RIsf2PPklZOWri0/Pw97DDUnn/P9YVkdzcla/VE5Wpwc1G9p+9n9Q6Axq9ERul9L6F3nQGA8m5OqwUUhxLWLh8l0ZvQKUzYKOQmdxpZW552oLkzkYhve/Pk95gJFdrwFIuQVnGWcJHyWgsiFMqoXD7ekm0eiP5Oj3WCpnJ3YBCyczafmHSpEm88MILfPbZZ9jZ3UsOevbsybPPmt6KW5LExERGjhxJfHw8Dg4O1KtXrzCxAYiPjycq6t7ix8DAQLZs2cLEiRNZtGgR3t7eLFy4sNSJzZPE0soarKxLPK7X6di/aTW71iwn5uZVlJZWNO3Yg57DR1MtqHgZ/srA1at0s4IyuRw7x9KVJfiv16mysFDKsfiPP9allZWrZvGGc/y89RLRSdm4OVoxvEtN/jeoQZGZE4ATVxKYvyqcbccj0ej0NAn1YHz/ejzTofpDJzmujta4Opb82huNRn7fdZVvN5wj/EYyFgoZvVoEMvGZhjSs7l7k3LiUHBauOcNvO6+SmpWPv4c9o3qFMa5f3WIJ0bbjESxcc4b94dFIJBLquFvRN9Sp2K2OHI2eLdfT2XUrk7R8HY6WMjoEONAn1KnYrqerKfn8dTWNM3d7Szlb0au6Iy2q2Zp8nawUMmq43v/DRFSmmr+upnEsNhe1zoCfgwXdgx3o9I96Pv8kl0pLfWtDKZNWaLLwXwnAXTKpxGS7iX+KyFDx15V0jsdmo9YbCXC0oGeIE+0D7B9Z4iaRSLBVlu45KWQSFDLzz5I96co8c+Pg4MDp06cJDg7Gzs6Os2fPEhQURGRkJDVq1EClMr0av7J4UmZu7kev07Fg6jhO7d1Og9adqNO8DVnpqRzYvIaczHQmf/kjdVu0q+gwHyuPS0PMlMx8Ok9cw43YDAZ3DKVRdXeuxaSzYscVbK2U7J4/iBAfRwB+33WV5z/5myBvB4Z3qVm45mbvmRjG9KnD4kmdzFbF2mAw8tJnO1j292U6N/KlV4sAMnM1LN9xheikbFbM6MGg9tUBuByZRpdJa1Bp9IzsVotgbwdOXE1k1d7rhAU4s/PLQTjemZn66JejfLj0GE1rejCoXQh6g5GVu65y/nYqLzV056maBbvPMvJ1vLM7iqRcLW397Al0siAmq6AQnq1SyqzOfnjc2Vm061YGXx9PINjbkRHdamJtqWD9gRscuhBPn1AnRjdyL/PrdCY+l08OxuLqYMVzPWrj5mjFzpORbD0eSXMfW95s7SM+9QMnY3OYczAWR0sZHQMdsFPKCE/I5XR8Lq397Jjc0rtCZqYE8zDrbSkPDw+2bdtGw4YNiyQ327dv56WXXqr0W61FcgObly9h5cLZTPryRxq26VT4uEaVz7w3x3Lj/Gm+2nq8sNKv8N8el+RmxMfb2HEyir0Lni6yjTwhLZdOb6zByc6CQ4uGEJucQ8jwpQzpGMqPb3VB9o9P+Uu3XeKlT3ew4t0eDO1knpIBd6/x6zvdebbLvQ7wWp2e5z7Zzl+HbnJ75SjcHK1o8vLvaHUGdn45EHenez+z524m02niGvq3CeaHt7qy/2wsHd9YzccvtWTaiGaF5xmNRqYtOcTnK09x6vtnaRDixtPvbmLf6UhmdfIrsoslJU/LO7uicLVWMKuzH4k5GsZvusWLvcJYPKlzkYae3/51jlfn7WFaWx9aVCv9e02+1sCYjTdpVc+HNTP7YvWPtTCbj9xm4IyNPFffjX41S18GoCrK0egZveEmdT2seau1N4p//Iweic7ms0OxjGnsQa/qZSsuK1ReZm2/0K9fPz766KPConkSiYSoqCjefvttcXvoMWA0Gtnx5y+07PZUkcQGQGlpxahpn5CXncXhresrJkDBbJLS81i97zrTRzQtVh/H09mGOWPbcPRSAqeuJvLjlgsoZFIWvt6+SGID8EKP2nRqWI1v1p01W6zfrDtLz+YBRRIbKGgV8fWEDhiNBQnQ4QvxnLmRzNxX2hZJbADqBbsxZUhjft91lbQsFYvWnaWmnxNvD29a5DyJRMLHo1vh42rLtxvOEZOczYZDtxhc26XY9lxXawUj67txISmPqEw1f9/IwMZKwbzX2hfrVD7uqXq0qO3JlusZZXruByILSvYvntS5SGID0LtlIEM6hbL1RkaFV9GuaHsjMtHoDYxr4lEksQFoeWf30uZr6U/86/SkKnNyM3fuXJKTk3F3dyc/P5/27dsTEhKCnZ0ds2bNMkeMQjnKycwgKSaKRu27mjzu5l0N/xp1uHnRfH+4hIpx5kYyWp2Bvq2CTB7v3SIAmVTCyatJnLiSSIeG1bC3Mb3Q+KnWwZy4mmSWOPV6A6euJZUYp4uDFW3qeXPiSiInryZiqZTRpXHxVgUFcQah0ui5GJHKyauJ9G0VZPIWkVwmpVeLAE5eTeT0tWQMRiPNTLSJAAofv5Gq4ka6mq5N/Qv7dP1b/zbBJRbOK8n1tHzqBbmWuM3/qTbBxGdryNY82RVtr6eqCHGxKuw99m/NfWyJydKg1ovk5klU5lVN9vb2HDx4kN27d3P69GkMBgONGjWiS5cu5ohPKGcyecE/uTrfdLVMALUqD7lcLHirapR3FnHmqkxXCM7X6NAbjCjkUhRyWbHigf+Uq9IW+7RcXqRSCXKZlLwS4oSCthOuDlYo5VL0BiNqrb7YLMfdOAEUcilKhbTE5373XIWs4DygxD+Kd8vpy6QFW85z8+8/ZlnXxsilEnJz7zPmnevdZ4PZE0Euldy32aTqTgFEmVhy80R64F+PTp06MWXKFN566y2R2DxGrG3tCK3fhAOb1picrr1xPpz4iJvUb92xAqITzKlZTQ+c7Cz4dftlk8d/3X4FqVRC1yZ+9Gjmz76zsUQkFG80q9cbWL7jCj2bB5glTolEQvdm/izfcQWDiXL916LTOXIxnh7NAuja1B+tzsDK3VdNjrXs78u4O1nRMMSN7k0DWLX3usmWEpk5ajYcvEWP5gG0ruONjaWcPbdNtzLZfTsTuRTqedjQyMuGnaeiilV8BtDpDSzffpmGnmVbu9bIy5YbcZkcu1S8KarRaGTZtkvUcrMu9Y6jqqqRlw0RGWpupRefGTMajey5nUU9D2uzJeFC5Vbqf/Vjx46xdevWIo8tW7aMwMBA3N3defnll1GrK7Y5nVA6vUe+zKWTh/lz0WdoNff+zWJuXWPRO//DJyiUBiK5qXKsLRW80r8+81eF8+v2y4WJg9FoZPuJSKYtOcSQjqH4utvxbJcaeDhZ88x7m4lKvJfg5ORrGD9vN1ej05nwdAOzxTppcCPO3kzm9YV7i8zg3IrLZPAHm/F1t+WZDtUJ8XFkQNtgpnxzgD3h9zYz6PUGftx8gW//Os//BjbAQinn1QH1yM7T8OzMraRn3/uDmJyRxzMfbEYqlTCmTx3srJWM61ePtZfTOBCZVfghwGg0cjI2hxXnkukQ4ICTlZyOgQ7YKmUMencTscn3EpzsPA2jP91BVGI2fWuUbeFvIy8b/BwtGPHxVi5F3GtXoNboePfHI+w9G0u/GmKRbPNqdnjZKph7KI7YrHutGtQ6A0vPJHMlJZ/+T/ii6ydZqXdL9ezZkw4dOjB16lSgoOFlo0aNeOGFF6hVqxaff/45Y8eO5YMPPjBnvA9N7JYqsHHpYn5f+Al2js7UatyCrLQUroQfx8M3gGnfrMDdx/QaBsG0x2W3lE5vYNSnO1ix4wrB3g40qO7G9egMzt1KoWPDaqyf1bewLszZG8n0mrqepIx8Ojashq2Vkj3h0eSpdHw3pTMv9DDd4La8LNl4ntfm78HOWkmHhtXIylGz92ws3i42bPm0P2GBBdV3M3PU9HvnLw6ci6NhdTeCvB04fS2J2/FZjOoVxneT7+1i2nzkNkM/2oLRCJ0a+aI3GNh9OgZLpYx1H/elQ4NqQMGurBEfb2P1vht42ykJcLQgNktDZKaahp42TGvrU1gl+HpqPh8fiCVbradTI1+sLOTsOhWFWqNnQnMv2gWUvZ9dYo6GD/bFEJeloU0dL9ydbdgXHk1qtprn67sx8F+Vh59Ucdka3t8TTXKuljB3K+yUMi4k5ZGjMfBSI/cyJ5ZC5WaWreBeXl5s3LiRJk2aAPDOO++wb98+Dh48CMCqVat4//3379v0sjIQyc09cRE37xTxu4aFlRVNOvagRdc+Rdo0CKXzuCQ3UDADceRiPD9tuUhUUjZuDlaM6FaL7k39i+34yc4rqC2z9VgEGq2eJjU8GNOnTpl7Wj2o2/GZLNl4nvDryVgoZfRuEcjwLjWxsSq6iFSvN7D1eAS/7bxKSmY+AZ4FRfya1/IstoA4MS2Xn7Zc5MD5OCQS6NTQlxd61MbFoWhRPaPRyIFzccxcsIPUPC2OlgUzNfU9rYtVCs7T6tlzO4vT8TnojRDqYkm3YEdcS1jsWhoavYGDUdkcjclGozfia6+ke4gj1exF9+h/UusMHIjK4nhMTmERv+4hjqIRZRVkluTG0tKS69ev4+vrC0CbNm3o0aMHM2bMACAiIoK6deuSnZ19v2EqnEhuhLJ4nJKWsjAajZy+lkR0cg5uDla0DPMqltjcFZeSw09bL6JS6+nbKpDmtb1KHPdyZBrXotOxs1bSpq534SLmR0Gj0fH95gvEJOdSL9iFIR1DS2zTkZSex/ErCUiQ0KK2Z7HE5i6j0cj8ORtIzdfhaCkn1MWyxBYIybkadt3KRGc00sLHjhCXkisQR2WqicvWYK2QUtvN+pEW5NMbjFxOySdHo8fTRkFAJerV9CjEZ2uIylRjIZNSy82qSJ8uczMYjVxLVZGh0uFiJSfE2dJshTCrIrO0X/Dw8OD27dv4+vqi0Wg4ffo0H374YeHx7OzsUnXlFgShYu09E8MbC/dy/va99RyBnnZ88nIbBne813ojT6Wh88S1nLqWhP7O+pw5K07g5WLD5k/7US/4XkPKC7dTGP/Fbg5fjC98zN3RindGNuPVAfXN/gb+6rzd/Lz1EmqtvvCxcV/s5tOxbRh3p3s4FNzCmrBwLyt3X0N7ZzeNhULKc91r8+Wr7Yps6d5xMpKJX+/jcmR64WNetgqeb+BOS997H4zyNAbe2R1JRIa6sBnmqotpuFjJ+bCjL74O92ZabqWp+O50IleS8wsfc7FW8ExtZ3o+gmJzu25l8PuFVJL/sRsr1NWKMY3cCb1PMlYVxGdr+PZkAmcS7u0UtVNK6Xenwej9+naVh2Mx2Sw9k0Rc9r3X3tdeyUuNPGjoVfFdzquaUic3PXr04O233+bTTz9l/fr1WFtb07Zt28Lj586dIzg42CxBCoJQPvafjaXnm+sIcbbk/fbVCHa2JC5bw7oraQz7aCtanYHhXWtiMBio88JyYpJzeGtYE57rXgs7ayUbD9/i/Z+P0PKVPzj30wiCfRy5GpVO+/+twlEh4a3W3oS5W5OWp2Pz9XQmfLWPzFwN74xs9t/BPaBRn27nl22XGdg2mElDGhPi48Dxy4nMXHaM/y3Yi0wqYUzfuqg0OrpPWcvliFRG1nOlla8dBqORA1HZLP/7EjdiMtg2dwBymZQdJyPp8/YGarla8WEH38L2C2svp/LpwVjebO1Naz97DAYDr265SaZKzzNhLnQMcEApk3A8NocV51OY/HcEi3oH4majJDJDzYw90QRXc2T1/zrRpq43UYnZfL3uLN/+fRmVzsCAWuZbS7P1ejrfnkxkSMdQ3nimIYFe9hy5mMDHy47y7u5oZnX2I8S5as7iJOdqmbYrCguZhIktvGjgaUOWRs+2GxksP5dCllrPS408zHb9I9HZfHowlkZeNrzazAtfeyW3M9SsuZTKR/uiea+9r0hwylmpb0slJyczcOBADh06hK2tLb/88gsDBgwoPN65c2datGhR6Qv5idtSQllUtdtSTV/+jfyMHGZ29EPxjwIgRqORL47EcTVTR+SfL/Hz1ou8Mm8Py6Z3Z3jXolWCb8ZmUG/UclrU9mLXvEEM/XAzB09FMbebf7Hmgb+eTWbD1XQi/xyFh3P5v3mnZanwGvg9w7rU4OepXYvMEKk0Otq89ie347NI3TiO7zddYPwXu/i8mz/V/zVLcS4xl3d3R/PHB70Y1C6Eei8uR6ZS8WEH3yK9iQxGI58ejOVGmoolfYPZeDWNn88kM7WNN618i06Tx2SpeX3LbRp42vBeB19mHYghAznHvxuGnXXR9SBTvtnPonVn+empYOz+o1Hkg8jT6nnpr1uM6F6LbyYW7QmWp9LS6pU/kOWr+LCjb7lfuzJYfCKBI9HZLOwZiKNV0c/06y+n8vOZZL7tE1TqxqNloTcYGbvxJoFOlkxr61NkhkhvMPLB3mjS83V81StQ3KL6D2Zpv+Dm5saBAwdIT08nPT29SGID9xYUC4JQOV24ncLp68kMquVSJLGBgtoyg8NcScrIZ9vxSOavDsffw45hnYv3jgr2cWRkt1ocuRhPZo6adQdu0ru6o8muyANqOSORFDThNIePfjmGTm/g3ZHNiv1hsFTKmfpsUzJy1Ow4GcnPWy7QxMe2WGIDBTVrwtytWbrlIqeuJXEpMo2na7sUa7oovfM6peTpOJeYx5brGfjYKWlpondUNXsL2vrbcz4pj0yVjpOxOUwc3KhYYgPw1rAmGI1wMKp4XaHycDQ6h3ytnukjir9O1pYKJg9tzJmE3CK3q6oKncHI3ohMelR3LJbYAPSs7oStUsruEuoaPazzSXkk5+kYHFb81pdMKuGZ2i5EZ2m4XsZK1sL9lXkllYODAzJZ8TcxZ2dnlEqxOl0QKquYO3VYAhxN77bxc7BAJpUQm5xNWpaKhqHuJS4yrh/sikarJykjD92dHSqm2CpluNsoCq9d3iISMrGykBN8p5P5vzUIcQXgYkQasck5JcYJ4O+gJDopuzDWwBLOvTtGap6WXK2e4PssCg1yskRnMJKu0mEwQv1/rFP6J3cnazycrEjJK15gsDyk5mtxtrXA1930bHWDkIK40vLNc/2KlKc1oNIZCXQ0fcvNQi7Fx05pvtc+ryBhDCxh4fbdBd2pZrr+k0qUbhSEJ4THncaSMf8oePZP8dka9AYj7k7WONhacOFWaolNBy9HpaNQyHCxt0IqkZQ4Zp5WT0qutvDa5c3H1ZZ8ta5IocF/uhSRBkCIjyMezjYlxgkQk63F09nmP1+nu487WsqxkkuJzFCX+DpFZ6mRSyU43GkNcTkqzeR5aVkqkjPycbQ0z+4yR0s5GbkaEtJyTR6/+zqZ6/oVyUouRSmTEJ1lusisVm8kIUdr1tceICbT9PVj7sRVFV/7iiSSG+GJNCRzbqm+qpIGIW7U9ndm3ZXUwt1P/7TmcipOthb0ahHI2L51uRGbwV+HbhU7Ly4lh6VbL9G4uhvO9pb0bhHA5usZJvv8bLmWgdZgZFiX4re3ysO7zzdHJpXw+cpTxY7p9AbmrjyFnbWCPi0Deb5HbY7HZBNl4o/MtdR8ziXkMrJHbZrX8iTE24G1l1Mx/CtpMRqNrL2ciqOljPqeNnQOciAyU014QvGkITlXy97bWdRwscLJSk5DLxvm/3kalab4J/SFa8LRG4y09TNP/aCWvnbIpRK++ON0sWMarZ55f54izN0aD9uqN/uukElo42fPthsZ5Gj0xY7vup1BplpPx0AHs1y/vqcNTpYy1lxOK5YEG41G1l5KxdNWQQ3Xqr1b7VETyY0gPCEkEgmfjW/L+cR8PjkQy7XUfPQGI1GZahYcjWfHzUw+HtMKKws5E59piIeTNcM+2soXf5wiJTMflUbHqr3XafPan2h1er6d3AmAD0e1JFWl57090ZxJyEVvMJKUq2VpeBLLzyfzxjMNqeZmnsX7ns429GkVyDfrzzF27i6uRqWj0xs4cjGe3lPXc+RSPDPu7NR6vkctavo5896eaHbdyiBfayBPq2fbjXQ+2hdD0xruPNM+BKlUwqfj23IqPrdw8bDeYCQyQ828I/HsjcjiufpuKGQSnglzwU4pZfaBWP66kka2Wo9aZ2B/ZBZTd0QCML6pJwDD67pyNTqdbpPXsvdMDDq9gdvxmUxatI+Zy44zsJazyTUh5cFWKWNwmAtf/nma1+bv4XpMwet08Hwsvd5aT/j1ZIbXdTXLtSuDwWEuaHQG3tkVxcnYHLR6I6l5Wn4/n8x3JxPpEuSAn4N5iiPKpRKea+DO/sgsvjwST0R6wc/TzTQVcw7GciIulxcauJt9K/qTptS7paoKsVtKgKq3C6os/jp0iwkL9xCVdG8djKu9JR+91IqxT9UtfCwtS0Wb1/7gWkwG/3yXcLKzYO3MPrSrX63wsaOX4hn92Y4iNWFsrRRMHtKIGSObl7h2pzwYDAaGfbSV9QdvodPfmz2yUMiYNqIp7z7XvPCxpPQ8XvpsB1uORhQ+JpXAgLYhLHmzC4629/7Ard53nUlf7SM29d6sjKOljJH13egS5Fj4WIZKx9s7IknI0fLPN1M7pZQZ7apR0+3eLbmLSXksPplI9D9mj2yUMgbWdGZQbWez7pYxGo38dTWdVZdSyVbfm8HwtlcyrrEH9T2r9lbkiAwVC48lcPMfC3eVMgm9qjvxXH23YovHy9vuW5n8cjaJDNW9197FSs6oRu60MdOMXVVjlgrFVYVIbgR4spMbKGhXsOdMDFGJ2bg5WtGtiR8WStOzBhduJfPtX+dRaQ30bhHAgLYhJs+729bhanQ69tZKujX1N7kzyFwyclTMXXmauNQcwgJcmDCoIfISqs/eiM3g8IV4JBJoV8+nxHYSOr2B2R+vL2y/UN/TpthOs7tupav4+0YGeoORpj62NDexgwoKXqdLyfnE52iwUcho6GWD5SOskqvWGQhPyCVHrcfDVkGYe/F2ElWV0WjkRpqqoEKxXEoDTxuTu/zMRas3cjYxl4x8Hc7Wcup72Jg9qapKzFKhWBCE+zMYjOw8FcXqfdfJztMSWs2Rl3qH4edR+T6VXY1OZ/uJyMLkxs3RymQfJoA6QW58/Uan/xzz+00XmPrtAXJVOqRSCe0bVGPDzL5YWj7420xyRh5Lt14i/EYyFgoZvVoE0r9NEAp58T9IjraWfDy6VanGDfFxJKSEHVb/dDkyjdPxOYXtFxwsZSa3kmv1BqIy1ah0BgxGSMzRkqPRm/zDKZFICHO3JszdPIus/4uFXEqLEhKvqk4ikVDdxcrkv+GjoJBJaOJtWyHXftKImRvhiVTeMzdpWSr6Tf+LwxfjqeHrhI+rDSevJZGTr2XOy62ZPKRxuV7vQRkMRiZ+vY+v153F3cmKOgEu3IzLJDIxmwFtg1k+oweWJczg3I/XwCUkpedjIZNQ3cWSlDwdCTlaZBLYPKcfXZsFlHnMP3ZfY9Sn2zEaoUVtT7LyNIRfT6aGrxObP+1HoJd5FoBCwczWawv2sGTjBZwsZfg6WBCXrSElT0dbPzsmtPAunMGJylQzc180Sbk6QpwtUcokXEvNRy6V8mYrb5r4iD9mglAexMyNIDxiQz/cUjAbMncAnRr5IpFIyMnXMPOX47z17UF83e2K9G2qKJ+vPMmi9WeZ92o7xvWrh1Ihw2AwsmrvdUZ9up0JC/fx3ZTOZRqz0ZgVJKXnM7CWM4PDXLFSSDEajZxLzOPTg7H0nrYBza4JZRrz6KV4Rs7axuCOoSz4X/vCxpbh15MY8sEWek1dz9kfR5itMefHvx7nh00XGNfEg67BjsilEvQGIwcis/jqeAL2Z5J4ubEH+VoDH+yNxlYh4+te9/pIZeTr+OZEAnMOxvJF9wD871NfRxCE8id2SwnCQzpxJYFdp6NZ8mYXOjf2K7y1Y2ulZM7Y1nRv5s9nv58ssRbKo6LW6Ji3Kpzx/erx+tMNCxMDqVTCkE6hfPxSK5Zuu1RiLRRTdDodF26m0NDThufqu2GlKHhLkUgk1Pe04fUWXugN8PLn28sU6xd/nCbU14ml07oV6djdsLo7qz/qzbXoDNYduFmmMUsrT6VlwepwnqpR0MzybsdumVRCh0AHhtZxYfuNDLLUevZFZpKer+Oddj5FGmQ6Wsl5s7U3Dv9v787DoqzaP4B/ZxgYBpBBkVV2FETclxR3xd1cyjTNcqmfbaamuYQt+pZlpvVamZa+hZqlLbhgbqmJmKICKpgigqwiqyzDPjDz/P5Ap5BBQYEZxu/nuuZKnvPMmXtmoLnnPOec29QI+69r39uGiBoPkxuiR/T7mUTYWMkwzs+9RptIJMJLY3xxMS4baTmNs0tvXZ2LyUR2fileHOOrtX32GF9UqtQ4fD65zn3uPH4dKgEY7inXOl+nl6MFLEzE+CUkvs59CoKA38MSMXOkDyRGNf8X1dnTBr3a22H/mZp78DSEvy7fQkGxEsM8tF/2Gu5phQq1gEsZxTifVoTOdtr3hzE2EmOwmxznb+r2fSd6HDG5IXpEZUoVLM1MYKTlgxgAWrYw1ZynS3c3j7sbz70szUxgJBZp3WSuNtn5JQBQ64oTI7EIMokYKlXdR63UagHKChWsaokTAKwspPWKsz7uvk+1PSfzOyNeFSo1KlQCzO+z2sbCRAxlPZ47ETUMJjdEj6izZ2vcuFWA66l5WtsPnUtCK0tTONvodmJpR/eqQpCHziVpbT8akQKVWqi1/pE2U4Z4QywCLqRrv5SVplAiu6QSbg51XzFmZCRGJw9rHD6vPc6ConKc+Tu9XnHWR2fP1hCJgMh07SMuF9Lv1p4yhbuVFJczS6BU1dydGQAibxXDvSXn2xA1NSY3RI9o0qC2sLGSYcFXJ2uMJkTGZmLz/suYPbpDrfvINBXH1hYY388DH+84j6SM6rWY8grLELD5L3Rta4M+Hezr3KeTbQuYmkhwMC4P8fdUNVaq1NgcmQGxCNi/ely9Yn1lfGcEn07A72HVLz2p1QKWfHMKykoVXhqr/fLao3Kzt8SoXq745UouckqqV8lWlFfih6hseFubwqOVKUa2bYnCchV+jM6pMacqNEmBy1klGN22ZaPESUS141Jweiw19FLwY5EpmLA8GG1aW+Clsb5wbG2Bk5duYufxWHT2aI2jnz8NC5nu6/bcyinCoAW/4XZBKWaN7oDuXna4npqH7w78DWWlGn/+dxI6edRvG/7oG9noOecniETAIFc5OtmZ4XZpJY7E5yG7uBLDe7ng0KdP1avPSpUaU1YexP4zCXhqgCfG9nFHQVE5th2JQdSNbPxv6XDMGtWhXn3WR0qmAgPn/Yp8RSn83eVwt5IiVaHE0Rv5AICPh7nAybJqRGZ/bC7+dyELXtamGOwmh1QiwrmbRQhPK8JgN0vM7+Pw2GySR9SYuEPxfTC5IaBxdiiOis/G2l2RCAqNh7JCBWdbC7w8rhMWTOoGc5lxgz/ew8rOL8FnP19A4KGryCkohYXMGNOHt8eSqT0eeu+Yq4k5GLjgNyiKyqESABGq5tvMfaoLPn9j0EP1WalSY3PwZWzcF42Y5FyIxSKMesIVi5/tgUFdnR7cwSPKyC3GZz9fwJa9l1CoVMPMWIzBbpZ42scaNubV388Lt4qwLzYXURklEAC4W0kxxqslhnnImdgQNRAmN/fB5IaAxi2/oFYLUFaqHmozvIZw90/6QXWKBEFAmVIFUxOjOp0rCKhTjaiEW3lwaGkBWQMmdOXKSkiMxLVO2v43tVpo0FpW+789DKVKgImR6IGvk0otQC2g1hINRACgFgQmvQ+Bm/jRY0sfakaJxSKdJDbBpxPwZdBFhEalAQAGdmmDeU93xYT+nlrPF4lEkEnvH+ep6DR8/ssFHD6fjIpKFXp42eK1iV0wY4RPtQSitLwSG/dGYcvvfyPuZj4sZMZ4ZlA7vPVsd3Rws37k5/ag+UpJGQp8/ssF/PhHDPKLlXBqbY6XnuyI+ZO6VSuG+TBEIhGkkrp9EBmJRWi6SkXUnNwuqUBwbB5OJBagoFyFlqZGGOoux4T2rSB/hBIlpB1fUSIDsDIwDB9uP49+HR3w+dyBAIBfQ+Lw9Hu/470ZT2DlbL969xl46ArmrD2Gju7WWPWSH8xlxjgQloiX1hzFiQupCHx7BMRiEUrKKjBqyR6Ex2Zh8uB2WDy1B27lFCHw0FX8EnId+1dPwOBGvIwUfSMb/guDIFSq4O9mCYcWJoi7XYY1P4bjlz+vI+TLyWgt100tISIASC9UYvnxFChVavi7y+EslyIxvxyH4vNxKqUQq4e5oLWZ/ly6NgS8LEUGRR9Gbpra6cu3MHD+r/jo//ri7em9qrWt+Skcy7ecQeiXk9Gvk2Od+0zJVKDd9G2YPboDNi4cWm2UZtefsZj+4WFsDRiBF0b4YPmW0/hq9yUc/exp9OngoDmvpKwCE9/Zj78TbyNx1+xGWS0mCAI6z96BssISfDjEudreNDcV5Vh+PBXjBrTFtuUjH/oxft98pCFCpcfY8mPJyCurxMf+rmgp++fvILu4AgHHkuFqJcV7g5x1GGHzUJ/LUlwKTtTMbdoXjXZOVlg6rWeNtiVTe6KdkxU27YuuV5//O3AFMqkE614fUGP+ytSh3hje0wXf7IuGskKF//3+N15+slO1xAYAzEyN8cX8wcjMK8HuRiqVcCr6Fq4m52J2F5sam+45WUoxsX1L/HLiOnIKShvl8YkeJDm/HFeyS/F8Z5tqiQ0A2JgbY2rH1oi8VYzMIqWOIjRMTG6ImrmLcVkY3dtN6yRasViE0b3dcDEuq159XriehUFd29S6fP1JP3dEXs9CalYhbivKMKaPm9bzfFxbwdNRXu/Hr6uLcVmQSsToZGemtb2XowWUlWpcTWJ9J9KNG3lV+z/1dNS+iWfPNhYQACTmlzdhVIaPyQ1RM2ciMYKiuPZvfYpiJaT1rJ4tNTFC4X36LLjT593imwW1nKtWCygsrf/j15WJRIxKtYDyWkoclFRU7RzcWI9P9CDGd750lFZo38X67u+ocQOu8CMmN0TN3lg/d+w+FQ9Fcc1vforicgSFxmOslqKe9+2zjztCo9MQn5Zfo61SpcaOozF40s8dTjYW6OzRGlsPX9Xaz8FzicjKK63349fVqN5uUAsCTiYptLYfTyiAnZUM3b0ap1QD0YN0sTeDsViEY4kFWtuPJxRAJhGjg4320Ud6OFwtRc3G4zhZuC5endAJG/ZEYdJ7v2Pb8pFwbF01/H0rpwgzP66aDPvyuE716nPqUC98sO0sJr33O379z1h4OVeVEMgrLMOCr07iRloBti8fCZFIhCXTeuCFj45gxfdheHt6L83y8tOXb+HltccxoLMjevvUvaRDfbg7yDFlcDtsPZ0Aa5kEPRzNIRKJUKkWcDg+D0du5GPNK/1hLOHIDemGpVSCYZ5y7LqcA3sLY/R1bgGxSASVuiop3xNzGxPbt4LMmGMNDYnJDVEz52TTAsEfj8fT7+2H+9RADOhctSrqVPQtWMiMEfzxeDjb1m9loJmpMQ59+hTGLNsLnxnb0dfXAeYyY/x1+RbUagE/vDMKvdpXJSzPDWuP5AwF3v0uDBv3RaO3jz1u5RQh6kYOerW3wy8rxz5w87tHsXnJMDyj+B0fhqbCWS6FrbkESflK3C6pwBtPdcGiKd0b7bGJ6uKlbrbIL1Nh7elbsLcwRhtLE6TklyO7pBKD3SwxvTNHFhsal4JTs8GRm/srKCrHjqPXcDLqJgBgYOc2eGGED+SPsIldaXklfg2Jw6FzSVBWqtDTyxYvjvGFXSvzGufG3czD9wevIDY1H5ZmJnhmcFuMfsKtTrsKPypBEHDiYlUtr5yCUrjZW2L26A7o3ACVw7kUnBqCIAi4llOKE0kK5JdWopWZBP7ucrSz5h5MdcUdiumxJQgCwq6kIylDAWtLGYZ0c9JMen0Ul+KzEZOcCwuZMYZ2c9arWlF3yS2kmNjfE7Ytq67d9/V1qDWxUasFnP77FlKzCmFjJcPgrk5aL93IpBI86ecOC5kxlHd2KNaW2ABAO6eWWP1y/4Z7QvUgEokwtLszhnZv2L1CKivV2B+bizSFErbmxnjSqyVMJPp5+SCloBxJeeUwkYjQydYM5ia8FKdPRCIRfGzM4MO5NU2CyQ0ZjOORKXhj/Qlcv5mvOWZrJcMHL/XFnCc7PlSfUfHZePWz4zh/LVNzTG5ugkVTumP58080aA2jR5FfVI7X//snfguJg0pdNRhrJBZh0qB22LRoaLUSBIfPJ+HNr04i7l+vk2Nrc3z4Ut9qlbbLlZVYvPEUvj94BWUVKs3x4T2csXnJMLjY3f+bU3P33ndnsG5XJJSV/6xy+fFyDoZ7yPFqr8aZQ/QwbhUqseFcOq5k/7OXj6lEhDHtWuL5zjYw0pPfUaKmpNPkZvXq1di9ezeuXbsGmUyGvn37Ys2aNfD29q71PiEhIRgyZEiN4zExMWjfvn1jhkt67OSlmxi7bB98Wpti1VBneFnLkF6oxL7YXLz62XFUVKjw+lNd6tVnbEoehi4MgqtdC+z9aByGdHNCZm4Jvgm+jJVbzyK/qBzrXh/YSM+o7sqUlRi1ZA/i0/Kx/o1BmDLUCwDw64nreD/wLEYt2YOQL56BqYkERyOSMT4gGEO6O2PLkmHo3s4WcWn5WLcrEi+tOQqVSo2XxnaEIAh47sNDOBiWiCm+1hjqLoepRIzwW0X46XImBs3/Fee/nQYbK8P8Froy8Cw+3hGO7g7mmOJrDY+WpkhVKBF09TYOxedDAPCaHiQ42cUVWH4sGTJjIyzt54huDuYoVqrxx418BF29jfwyFRb0cXhwR0QGRqfjqydPnsTcuXNx9uxZHD16FJWVlRgxYgSKi4sfeN/Y2Fikp6drbu3atWuCiElfLfv2L3i2kmLFYGd0sjOHVCKGW0tTLOjjiJFtrfDO/06juLSiXn1+sO0srCykOLH+GYzr6wELmQk821hh7WsDsHpOP6z/7SKSMrQvQW5KO4/HIiI2E4fXPoXXn+qC1nIZWstleG1iFxz+dCIiYjOx83gsBEHA0k1/oX8nRxz4ZAIGdG4Dc5kxura1wQ/vjMSMkT5YvuUMypSVCI1Kw96/EvBmHwdM9m0NazNjmJsYYbCbHB8NdUZOfim+2h2l66feKNRqNdbuDEdXezO8O9AJPjZmkErEaNvKFEv7OaK/SwscSyhAWaX2fUua0u6Y21ALwOphLujnYgkzYyPYmBtjemcbvNrLHn8mFiDhziZyRI8TnSY3hw8fxqxZs+Dr64suXbogMDAQKSkpiIyMfOB9bW1tYW9vr7kZGWm/vlxeXg6FQlHtRoYlNiUP4dcyMdG7FSRahuAn+bSCoqQCwWcS6txncWkFgkLjMfepLlrnrbw+sQssZCb48ei1R4q9IfxwJAbDe7qip7ddjbYe3nYY3tMVPxyJQfSNHEQn5GDJtJ6Q3DPJVyQSIWB6L+QUlOLQuSRsPxIDJ7kUfZ1rTrq3MTfGINcW2HrwSqM9J13aeigGZRVqTOpgXeOSjkgkwuQO1qhUC9gbc1tHEVZRCwJCkhQY7mkFKy1VpYe6y9FSJsGJWvZXITJkejUzrqCg6o+wVatWDzy3W7ducHBwgL+/P06cOFHreatXr4ZcLtfcnJ1ZnMzQpOdWjfS5WmmfPGtnYQKZsRgZtx88InhXbmEZKirV6OCm/XfRXGYMd3tLpNejz8aSnluMju7WtbZ3dLdGem6x5nWq7Vwv55aQGFW9Tum3i+DUwrjWJdwuciky8koePXg9FHdn40JXufbfJ+c7x7OK6zcS2NCUKgElFWq4yLWXyJCIRWjTwgR5pZVNHBmR7ulNciMIAhYtWoT+/fujY8faJ386ODhg8+bNCAoKwu7du+Ht7Q1/f3+EhoZqPT8gIAAFBQWaW2pqamM9BdIRR+uq1TvJtdRmySxSorRCDYfW2lf5aNOqhSlMjI1wJVH7t/OiUiUSMxRwrEefjcWhlTkuJ+TU2n45IQeO1haa16m2c2NT8lCpUsOhtQUcW1sgVVGB2naKSMkvh0Mrw5xv4+VsBaD236eUgqrj9hbak4qmYmIkgrmxuNY4K9UC0hTlaCXjuhF6/OhNcvPGG28gOjoaO3fuvO953t7emDNnDrp37w4/Pz9s3LgRY8eOxbp12vdAkUqlsLS0rHYjw+Ll3BK929thz7VcVNxTY0gQBPx29Tbk5iYY39ejzn2ay4wxaWBbfL0nCvlFNT88vt4TheKyCkwfpvtJ7DNGdcDRiBScj8mo0RZ+LQNHI1LwwkgfdPJoja5tbbB2ZyQqVdXniwiCgI93nIeNlQyjn3DFjJEdkKYox+nUwhp9ZhVXICRZgVmjfRvtOenSzJE+MDU2wm9Xb2tWnt0lCAJ+uXIbErEIE9s/eIS5MYlFIgx2s8TRhAKtozPHEwqQV6bCUHe5DqIj0i29SG7mzZuH4OBgnDhxAk5OTvW+f58+fRAXF9cIkVFzsea1AUjML8eKkFREZRSjpEKFhNwy/DcsHX/cKMBHc/rBzLR+e9OsmNUbihIlBi/4FXtOxaOgqByxKXlYuOEklm85g4WTu8HVXvfJ8rShXujtY4/RS/fiq6BLyMwtRmZuMTbsvoRRS/ait489pg31gkgkwqev9seZK+kYs3QvQi7dhKK4HBGxmXjuw8PYcfQaVr/cD1ITCQZ0dsTTAzyx/mw6dv2dg+ziChSWq/BnQgHe+TMFdq3MMW9SV10/9UYhFovx9vO9EJVZgg9OpuJKVglKKlS4frsUn/yVhjOphRjhKdeL/W7uzgsKOJaM0CQFipQqZBQp8UNUNr6JyMAwDzncWprqOkyiJqfTHYoFQcC8efOwZ88ehISEPPSKp2eeeQa5ubn4888/H3gudyhuvh60Q/GJi6mY98UJxCTnaY45tDLDBy/1xYtjHm6U4XJCDl77/E+EXUnXHGvZQoq3pnTH29N7NWpZgfooKCrHG1+cwC8n4jSjMhIjMaYMaYcNC4ZUmxT9R3gyFm44iWsp/7xOzrYW+PClvnhhhI/mmLJChWXf/IUtv19GqfKffW5G9XLFt0v84WRj2H8/KwPP4tOd4Sj/VzVnY7EII9taYU6PmpO3dSW9UImN4RmIzvxnDpRMIsZYr5Z4rlNr7nNDBqM+OxTrNLl5/fXX8dNPP2Hfvn3V9raRy+WQyaq2pA4ICEBaWhq2b98OAFi/fj3c3Nzg6+sLpVKJHTt24JNPPkFQUBCefvrpBz4mk5vmqy7lFwRBwLmYDCRnFMJabopBXdo0SNHEywk5uJp0Z4fi7s6a4pD6Jv12Mc78fQsA0LejIxystc8JuruTc0pWIWytzDCwS5saK6juyi8qR8ilm1BWVO1Q7NnGqrHC1zuVlWrMD/gZNxUVsDOXYHRb/d2h+KaiHEn55TAWi9DJzgxmDbAzN5E+aTblFzZt2gQAGDx4cLXjgYGBmDVrFgAgPT0dKSkpmjalUonFixcjLS0NMpkMvr6+OHDgAMaMGdNUYZMeuxSfjZ+Oxd4pv2AKtVrAsB4uj7yTcCeP1ujk0bqBomwciekFeGtjKM7HVO2m/ISPHT57fSDcHWrOuRCJROjb0RF969Cv1Z2yDo8jiUSMMe10O7emrpwspXCyfPg6YkSGhIUzSecaoiCmWi1g/pch2LQvGm1aW6Cblw1upBUgJjkXQ7s5YfeqcWhhptvVLY1p/W8XsXTTKQDAoK5V89ZOXqoqoPnpawPw5jPddBbb44IFNokaV7MZuSFqKOt+jsQ3wdH4av5gvDy+EyRGYgiCgCPhyZj6n0OYs/YYdq0wzNG9c1fTsXTTKQzo3AY/vjcK9ncKW2bkFuP5VYexdNMp9Paxg5+vo44jJSJqGvp58ZioHpQVKqz/9SJeHtcJrz/VRTN3RCQSYdQTblj72gD8djIOiemGuVPrW1+HwtREgj2rntQkNgBg38ocuz98EqYmEizZeEqHERIRNS0mN9TsXYjLQmZeCWaO7KC1ffowbxiJxTh8PrmJI2salxNvY/LgdrA0rznfwtJcismD2+FyLZsREhEZIiY31Owp7yzVtTTXPqdGJpXAWCKGskKltb25EwSh1ucOVL0uj9nUOiJ6zDG5oWbP160VpMZGOBCWqLX9zwupKC2vRHcv2yaOrGnYWMkQfDoBanXNBEatFhB8OgE2VjIdREZEpBtMbqjZs5bL8OxQL3y6MwIxybnV2m4XlGLJplPo5GGN/p0Mc0Lt28/1QlKGAqt/DK/R9slP4UjKUGDptJ46iIyISDe4WooMwrrXBuDC9Sz0emUnpvl7o6e3HRJuFWDr4asAgGOfP603uwk3tDnjOmHXn9fx/vdh2H8mAdP8qzbE3HU8FuevZWJwVye8Mr6zjqMkImo63OeGGk1D7F9TH4ricny1OwrfH7yC5EwFrC1leG6YNxZO7gYXO93XgGpsKwPPYuO+KOQqygBUVTZ/bWJn/Ge2n44jezxwnxuixtVsyi/oApObh6eqrESFshxSmVmdRkGaOrn5N0EQ6hRjRaUKFZVqyKQSgxnZKSlTAgDMTA1308KmpFYLKCmvgJnU+L47XTO5IWpc3MSPGlTK9RgEb/0a548fQmWFElatbTHkqWkY+8IrepsgPihR+fNCKtbuisTRiGQIAuDlbIXXxnfGaxM7N0gtqqYmCAJ2/XkdX/52Eeev3Sm/0N4O85/phql3KoJT/aTfLsanOyOw/UgM8ovKYSEzxnPDvLHsuV5w04Nq8ERUOyY3dF9XI8Lw6fwZaNnaDs+89has7Rxw/VIEDu7Yggsnj+LdLb/AvEXN2kX67LsDf+OVz46jeztbfDFvMCzNTXDoXBIWbzqF4xdTEfTBk7UWkdRXy779C5/9fAHDerpg82J/AMCvIXF4ftVhXIzLwqevDtBxhM1LUoYCg+b/itLySsx5siO6tLXBteRc/O/A39gdegMhXzwDH9fmUXOK6HHEy1JUq8oKJeaP9UMbDy8sWf89TEz/WU6cGn8N/3npGfQbPRGz316l9f66vCxVm7TsInhMC8SLYzrg6zeHVrvMcOhcEiYsD8YX8wbhtYlddBhl/YRcugn/hUH4fO5ALLinhtSXv13Ewq9Dcfy/kzD4Ts0perCxb+/FteQ8nPpqMhxbW2iO3y4oxdCFQTCTShC2aWq1+/CyFFHjqs9lqeb19ZSaVOTJo8jPycKMxSuqJTYA4Ny2PUZOnYVTv/+GspJiHUVYf98fvAJTEyOseaV/jfkTo3u7YUJ/T3wTfFlH0T2cb/ZFw8e1FeZP6lqjbd6krvBxbYVv9kU3fWDNVGJ6AQ6fS8b7M3tXS2yAqm0HVv1fX5y/lokL17N0FCERPQiTG6pV8vWraGXnAOe27bW2d+k7GGUlxci+ldrEkT28qBvZ8PN10FqqAABGPuGKvxNvQ6VSN3FkDy/6Rg5G9HLROq9GJBJhRC8XXE7I0UFkzVP0jarXauQTrlrbR905Hs3XlEhvMbmhWplITVFaXITKigqt7UUF+QAAYxPtiYI+kkklyCssq7U9T1EGE2Oj+66K0TcyqQS5ivJa23MV5TA14fS6upJJq16ru0vq73X3uKlJ85t4TvS4YHJDteo+cBhKiwpx7tjvWttP7N0JJ08v2Dm7NW1gj2BcXw9ExGYhKj67RlulSo2th69ifF+PZrW6aFxfd+w5Fa/1wzhXUYbdofEY389DB5E1T/07OaJlCym+O3BFa/t3dy5tDu/h0sSREVFdMbmhWrm080H3gcMQuPpdXDx1XFN8saykGLu++gSRIX9g/Ky5zSoRmNjfE97OLfHMigOIiM3UHM/OL8GMj48gPq0Ai6Z012GE9ffK+M4wlogxYXkwEm4VaI4n3CrAxHeCYWIsxsvjOukwwubFzNQYCyZ1wxdBF/H1nihNwdW7ye8H287h5XGdYC1nvS4ifcXVUnRfJYUK/Hfxy7gSfhp2Tq5oZeeApNgrKCspxrNzl2L87Lm13lcfV0sBVct8n3x7H2KSc9HJwxpWFlKci8mEkViEwLdHYPLgdroOsd7OXc3AxHeDkZ1fil7edgCA8NhM2FjJsHfVePTuYK/jCJsXtVrA/C9DsGlfNGxbytDB1RrXb+bhVk4xpvl7I/Dt4TX2Q+JqKaLGxR2K74PJTf0JgoBrF84i7I/9KCkqhL2TGwZNeBY2jvdfWqyvyQ1Q9S18/5kEHAhLRHmFCt3a2WLmSJ9m/W28pKwCu/68jtCoNADAgM6OmObvDTNTYx1H1nxdSbyN7UdicDO7EHatzPD8cJ9aq8szuSFqXExu7oPJTdPR5+SGmo+fjl3D0YgUmEolmDuhEzp62Dxyn9dT83D671sQiUQY1KUN3B0efSNKJjdEjYvlF4io2Tt8LgnT/nMQitJ/VuttCb4Mb5eWOP31FFhZmNa7z4zcYrz06VEcPpesOSYSVc3F2rx4GFpZ1r9PItI/TG6o3jgiQ40t/FoGJiwPhq25BG8MaIMejhYoqVDjeEI+dkTnoOPMHUj59UWIxXVfE1FUqsTwRbuRV1SOrQEj8MygdlCp1fj5z+sI2Hwao5fuQeiXkyHlsnmiZo+rpYhI77z62XFIjURYPcwVvZ1aQCIWwVJqhKd8rLHIzwHpucXYvP/vevUZePAqrt/Mx7HPn8YLI3wgk0pgITPBS2M74uCnExERm4VfQuIa6RkRUVNickNEeudywm2M8LSClWnNURQ/5xawMZPgq92X6tXnjqMxGN/PA+1daha87OltB//uzvjhj5iHDZmI9AiTGyLSK0plJVRqAU6WJlrbxSIRnORS5Bcp69VvVn4pvJ1b1tru7dIS2Xml9eqTiPQTkxsi0ismJhJIxCIk5GsvKaFSC0jMK4ONVf0m/zq1tkDUjZo7U991KS4bbWwsam0nouaDyQ0R6Z1e7e1wLCEfmVpGZ44lFCC/TIWl03rWq8+Zozvg0LkknI/JqNF2PDIFZ66kY/boDg8dMxHpDyY3RKR3vnt7BASIsPRoMg7H5+F2SQVSCsrx/YVMbArPQDsnOZ4bpr1afW2eH94evX3sMXLJHqzbFYmkDAVupOXjox/OY+K7++HfwxkT+ns20jMioqbENY9EpHe8nav2shkfsB+bwjOxCVV1wIxEQP/Ojvhj7VP17tPURIJDn07Ewg2heO/7MCz79i8AVVXAZ470wbrXB0JixO97RIaAyQ1pcP8a0ifdvexwM+j/cO5qOg6dS4aZqQQvju6A1lZmD92npbkU3y0bjjWv9kf4tUyIREBvH3u0bMHN+4gMCZMbItJrvTs4oHcHhwbts7VchtG93Rq0TyLSHxyDJSIiIoPC5IaIiIgMCpMbIiIiMihMboiIiMigMLkhIiIig8LkhoiIiAwKkxsiIiIyKExuiIiIyKAwuSEiIiKDwuSGHmuFJUrkFJRCrRZ0HQoRETUQll+gx9KBsESs3RWBU9G3AADOthZ4dXxnLJzcDVIT/lkQETVnOh25Wb16NXr16oUWLVrA1tYWEydORGxs7APvd/LkSfTo0QOmpqbw8PDAN9980wTRkqH4ek8Uxi8PBgD8b+kw/PqfsRje0xX/2XYO49/ZD2WFSscREhHRo9BpcnPy5EnMnTsXZ8+exdGjR1FZWYkRI0aguLi41vskJiZizJgxGDBgAC5evIjly5dj/vz5CAoKasLIqblKyVRg4YaTmD+pK06sfwazR/vi6YFtsWXJMBxcMwEhF2/i2+DLug6TiIgegUgQBL2ZbJCdnQ1bW1ucPHkSAwcO1HrOsmXLEBwcjJiYGM2xV199FVFRUQgLC3vgYygUCsjlcvwv9CrMLFo0WOz67NmCdboOQW+s+D4MX+6+hNRfX4KFzKRG+7QPDuFyQg7+3vqCDqKjx8Hvm4/oOgSiZqmkQoVpv8WhoKAAlpaW9z1XryYUFxQUAABatWpV6zlhYWEYMWJEtWMjR45EREQEKioqapxfXl4OhUJR7UaPrytJt9Gng4PWxAYAhvVwRkxyLlQqdRNHRkREDUVvkhtBELBo0SL0798fHTt2rPW8jIwM2NnZVTtmZ2eHyspK5OTk1Dh/9erVkMvlmpuzs3ODx07Nh7mpMXLyS2ttz84vhamJEcRiURNGRUREDUlvkps33ngD0dHR2Llz5wPPFYmqf/DcvbJ273EACAgIQEFBgeaWmpraMAFTszShvycuxGUh/FpGjTZlhQpbD1/FxP6eWn+XiIioedCL5GbevHkIDg7GiRMn4OTkdN9z7e3tkZFR/YMpKysLEokE1tbWNc6XSqWwtLSsdqPH1/h+HujkYY3JKw4gNCpNkxjfzC7E1A8OITmzEG8920PHURIR0aPQ6YYegiBg3rx52LNnD0JCQuDu7v7A+/j5+WH//v3Vjv3xxx/o2bMnjI2NGytUMhASIzEOfDIRE94JxpA3f4OnoxxycymibmTDXGaMX1aOQXcvW12HSUREj0Cnyc3cuXPx008/Yd++fWjRooVmREYul0MmkwGouqyUlpaG7du3A6haGbVhwwYsWrQIc+bMQVhYGL777rs6Xc4iAoA2NhY4/800HItMwe9hiShXqvDiWF88P7w9Wphpn2hMRETNh06Tm02bNgEABg8eXO14YGAgZs2aBQBIT09HSkqKps3d3R0HDx7EwoUL8fXXX8PR0RFffvklJk2a1FRh6w0u8X54YrEII3q5YkQvV12HQkREDUznl6UeZOvWrTWODRo0CBcuXGiEiIiIiKi504sJxUREREQNhckNERERGRQmN0RERGRQmNwQERGRQWFyQ0RERAaFyQ0REREZFCY3REREZFCY3BAREZFBYXJDREREBoXJDRERERkUJjdERERkUJjcEBERkUFhckNEREQGRadVwUm7ZwvW6ToEIiKiZosjN0RERGRQmNwQERGRQWFyQ0RERAaFyQ0REREZFCY3REREZFCY3BAREZFB4VLwJsQl3kRERI2PIzdERERkUJjcEBERkUFhckNEREQGhckNERERGRQmN0RERGRQmNwQERGRQWFyQ0RERAaFyQ0REREZFCY3REREZFCY3BAREZFBYXJDREREBoXJDRERERkUJjdERERkUJjcEBERkUGR6DoAQ/BswTpdh0BEzcSTL4+s03m/bz7SyJEQGS6O3BAREZFBYXJDREREBoXJDRERERkUJjdERERkUJjcEBERkUFhckNEREQGRadLwUNDQ7F27VpERkYiPT0de/bswcSJE2s9PyQkBEOGDKlxPCYmBu3bt2/w+LjEm4iIqPnRaXJTXFyMLl26YPbs2Zg0aVKd7xcbGwtLS0vNzzY2No0RHhERETVDOk1uRo8ejdGjR9f7fra2trCysmr4gIiIiKjZa5Zzbrp16wYHBwf4+/vjxIkT9z23vLwcCoWi2o2IiIgMV7NKbhwcHLB582YEBQVh9+7d8Pb2hr+/P0JDQ2u9z+rVqyGXyzU3Z2fnJoyYiIiImlqzqi3l7e0Nb29vzc9+fn5ITU3FunXrMHDgQK33CQgIwKJFizQ/KxQKJjhEREQGrFmN3GjTp08fxMXF1doulUphaWlZ7UZERESGq1mN3Ghz8eJFODg41Pl8QRAAAKXFRQ88V1Fc/tBxERE9ipIKla5DINIrd/8m7n6O349Ok5uioiLEx8drfk5MTMSlS5fQqlUruLi4ICAgAGlpadi+fTsAYP369XBzc4Ovry+USiV27NiBoKAgBAUF1fkxCwsLAQDzRj/xwHP/r57Ph4iIiBpXYWEh5HL5fc/RaXITERFRbVO+u3NjZs6cia1btyI9PR0pKSmadqVSicWLFyMtLQ0ymQy+vr44cOAAxowZU+fHdHR0RGpqKlq0aAGRSNRwT+Yed+f2pKam8lKYHuP71DzwfWoe+D7pv+b8HgmCgMLCQjg6Oj7wXJFQl/EdqjeFQgG5XI6CgoJm9wv0OOH71DzwfWoe+D7pv8flPWr2E4qJiIiI/o3JDRERERkUJjeNRCqVYsWKFZBKpboOhe6D71PzwPepeeD7pP8el/eIc26IiIjIoHDkhoiIiAwKkxsiIiIyKExuiIiIyKAwuSEiIiKDwuSmka1evRoikQhvvvmmrkOhf1m5ciVEIlG1m729va7DonukpaXh+eefh7W1NczMzNC1a1dERkbqOiz6Fzc3txp/SyKRCHPnztV1aPQvlZWVePfdd+Hu7g6ZTAYPDw988MEHUKvVug6tUTT7wpn6LDw8HJs3b0bnzp11HQpp4evri2PHjml+NjIy0mE0dK+8vDz069cPQ4YMwaFDh2Bra4sbN27AyspK16HRv4SHh0Ol+qfI599//43hw4dj8uTJOoyK7rVmzRp888032LZtG3x9fREREYHZs2dDLpdjwYIFug6vwTG5aSRFRUWYPn06tmzZglWrVuk6HNJCIpFwtEaPrVmzBs7OzggMDNQcc3Nz011ApJWNjU21nz/55BN4enpi0KBBOoqItAkLC8OECRMwduxYAFV/Szt37kRERISOI2scvCzVSObOnYuxY8di2LBhug6FahEXFwdHR0e4u7tj6tSpSEhI0HVI9C/BwcHo2bMnJk+eDFtbW3Tr1g1btmzRdVh0H0qlEjt27MCLL77YqIWJqf769++P48eP4/r16wCAqKgo/PXXX/UqPN2ccOSmEezatQsXLlxAeHi4rkOhWvTu3Rvbt2+Hl5cXMjMzsWrVKvTt2xdXrlyBtbW1rsMjAAkJCdi0aRMWLVqE5cuX4/z585g/fz6kUilmzJih6/BIi7179yI/Px+zZs3SdSh0j2XLlqGgoADt27eHkZERVCoVPvroI0ybNk3XoTUKJjcNLDU1FQsWLMAff/wBU1NTXYdDtRg9erTm3506dYKfnx88PT2xbds2LFq0SIeR0V1qtRo9e/bExx9/DADo1q0brly5gk2bNjG50VPfffcdRo8eDUdHR12HQvf4+eefsWPHDvz000/w9fXFpUuX8Oabb8LR0REzZ87UdXgNjslNA4uMjERWVhZ69OihOaZSqRAaGooNGzagvLycE1f1kLm5OTp16oS4uDhdh0J3ODg4oEOHDtWO+fj4ICgoSEcR0f0kJyfj2LFj2L17t65DIS2WLFmCt99+G1OnTgVQ9aUuOTkZq1evZnJDD+bv74/Lly9XOzZ79my0b98ey5YtY2Kjp8rLyxETE4MBAwboOhS6o1+/foiNja127Pr163B1ddVRRHQ/gYGBsLW11UxYJf1SUlICsbj6NFsjIyMuBae6adGiBTp27FjtmLm5OaytrWscJ91ZvHgxxo0bBxcXF2RlZWHVqlVQKBQG+Q2muVq4cCH69u2Ljz/+GFOmTMH58+exefNmbN68Wdeh0T3UajUCAwMxc+ZMSCT8WNFH48aNw0cffQQXFxf4+vri4sWL+Pzzz/Hiiy/qOrRGwd9CeizdvHkT06ZNQ05ODmxsbNCnTx+cPXuWowJ6pFevXtizZw8CAgLwwQcfwN3dHevXr8f06dN1HRrd49ixY0hJSTHYD0pD8NVXX+G9997D66+/jqysLDg6OuKVV17B+++/r+vQGoVIEARB10EQERERNRTuc0NEREQGhckNERERGRQmN0RERGRQmNwQERGRQWFyQ0RERAaFyQ0REREZFCY3REREZFCY3BAREZFBYXJDRM2OSCTC3r17a20fPHgw3nzzzSaL535CQkIgEomQn5+v61CIHhtMboioTrKysvDKK6/AxcUFUqkU9vb2GDlyJMLCwnQdmt7Qp6SK6HHG2lJEVCeTJk1CRUUFtm3bBg8PD2RmZuL48ePIzc3VdWhERNVw5IaIHig/Px9//fUX1qxZgyFDhsDV1RVPPPEEAgICMHbsWM15BQUFePnll2FrawtLS0sMHToUUVFRmvaVK1eia9eu+Pbbb+Hs7AwzMzNMnjy52iWb8PBwDB8+HK1bt4ZcLsegQYNw4cKFR4pfqVRi6dKlaNOmDczNzdG7d2+EhIRo2rdu3QorKyscOXIEPj4+sLCwwKhRo5Cenq45p7KyEvPnz4eVlRWsra2xbNkyzJw5ExMnTgQAzJo1CydPnsQXX3wBkUgEkUiEpKQkzf0jIyPRs2dPmJmZoW/fvoiNjX2k50REtWNyQ0QPZGFhAQsLC+zduxfl5eVazxEEAWPHjkVGRgYOHjyIyMhIdO/eHf7+/tVGd+Lj4/HLL79g//79OHz4MC5duoS5c+dq2gsLCzFz5kycOnUKZ8+eRbt27TBmzBgUFhY+dPyzZ8/G6dOnsWvXLkRHR2Py5MkYNWoU4uLiNOeUlJRg3bp1+OGHHxAaGoqUlBQsXrxY075mzRr8+OOPCAwMxOnTp6FQKKrN+/niiy/g5+eHOXPmID09Henp6XB2dta0v/POO/jss88QEREBiUTCCtpEjUkgIqqD3377TWjZsqVgamoq9O3bVwgICBCioqI07cePHxcsLS2FsrKyavfz9PQUvv32W0EQBGHFihWCkZGRkJqaqmk/dOiQIBaLhfT0dK2PW1lZKbRo0ULYv3+/5hgAYc+ePbXGOmjQIGHBggWCIAhCfHy8IBKJhLS0tGrn+Pv7CwEBAYIgCEJgYKAAQIiPj9e0f/3114KdnZ3mZzs7O2Ht2rXV4nJxcREmTJig9XHvOnHihABAOHbsmObYgQMHBABCaWlprc+BiB4eR26IqE4mTZqEW7duITg4GCNHjkRISAi6d++OrVu3Aqi67FJUVARra2vNSI+FhQUSExNx48YNTT8uLi5wcnLS/Ozn5we1Wq25TJOVlYVXX30VXl5ekMvlkMvlKCoqQkpKykPFfeHCBQiCAC8vr2pxnTx5slpcZmZm8PT01Pzs4OCArKwsAFWX2zIzM/HEE09o2o2MjNCjR486x9G5c+dqfd99rkTU8DihmIjqzNTUFMOHD8fw4cPx/vvv4//+7/+wYsUKzJo1C2q1Gg4ODtXmstxlZWVVa58ikajaf2fNmoXs7GysX78erq6ukEql8PPzg1KpfKiY1Wo1jIyMEBkZCSMjo2ptFhYWmn8bGxvXiEsQBK2x3nVv+/38u/+7/ajV6jrfn4jqjskNET20Dh06aOaddO/eHRkZGZBIJHBzc6v1PikpKbh16xYcHR0BAGFhYRCLxfDy8gIAnDp1Chs3bsSYMWMAAKmpqcjJyXnoGLt16waVSoWsrCwMGDDgofqQy+Wws7PD+fPnNX2oVCpcvHgRXbt21ZxnYmIClUr10LESUcPgZSkieqDbt29j6NCh2LFjB6Kjo5GYmIhff/0Vn376KSZMmAAAGDZsGPz8/DBx4kQcOXIESUlJOHPmDN59911ERERo+jI1NcXMmTMRFRWFU6dOYf78+ZgyZQrs7e0BAG3btsUPP/yAmJgYnDt3DtOnT4dMJnvo2L28vDB9+nTMmDEDu3fvRmJiIsLDw7FmzRocPHiwzv3MmzcPq1evxr59+xAbG4sFCxYgLy+v2miOm5sbzp07h6SkJOTk5HBkhkhHmNwQ0QNZWFigd+/e+O9//4uBAweiY8eOeO+99zBnzhxs2LABQNWlloMHD2LgwIF48cUX4eXlhalTpyIpKQl2dnaavtq2bYunn34aY8aMwYgRI9CxY0ds3LhR0/79998jLy8P3bp1wwsvvID58+fD1tb2keIPDAzEjBkz8NZbb8Hb2xvjx4/HuXPnqq1mepBly5Zh2rRpmDFjBvz8/GBhYYGRI0fC1NRUc87ixYthZGSEDh06wMbG5qHnCRHRoxEJ9bloTET0CFauXIm9e/fi0qVLug7lkanVavj4+GDKlCn48MMPdR0OEf0L59wQEdVBcnIy/vjjDwwaNAjl5eXYsGEDEhMT8dxzz+k6NCK6By9LERHVgVgsxtatW9GrVy/069cPly9fxrFjx+Dj46Pr0IjoHrwsRURERAaFIzdERERkUJjcEBERkUFhckNEREQGhckNERERGRQmN0RERGRQmNwQERGRQWFyQ0RERAaFyQ0REREZlP8Hd0Z0/lPgCw8AAAAASUVORK5CYII=",
+ "text/plain": [
+ "