diff --git a/.dockerignore b/.dockerignore deleted file mode 100644 index 98f39e7be..000000000 --- a/.dockerignore +++ /dev/null @@ -1,4 +0,0 @@ -data -results -*.bvecs -venv diff --git a/.github/workflows/benchmarks.yml b/.github/workflows/benchmarks.yml index cdd86dd47..1302e3311 100644 --- a/.github/workflows/benchmarks.yml +++ b/.github/workflows/benchmarks.yml @@ -1,61 +1,61 @@ -# Contributed by @GuilhemN in https://github.com/erikbern/ann-benchmarks/pull/233 -name: Billion-Scale ANN Benchmarks - -on: [push, pull_request] - -jobs: - build: - runs-on: ubuntu-latest - strategy: - matrix: - include: - - algorithm: faiss-ivf - library: faissconda - dataset: random-xs - - algorithm: faiss-t1 - dataset: random-xs - library: faissconda - - algorithm: faiss-t1 - dataset: random-range-xs - library: faissconda - - algorithm: diskann-t2 - dataset: random-xs - library: diskann - - algorithm: diskann-t2 - dataset: random-range-xs - library: diskann - - algorithm: httpann_example - dataset: random-xs - library: httpann_example - - algorithm: httpann_example - dataset: random-range-xs - library: httpann_example - fail-fast: false - - steps: - - uses: actions/checkout@v2 # Pull the repository - - - name: Set up Python 3.6 - uses: actions/setup-python@v2 - with: - python-version: 3.6 - - - name: Install dependencies - run: | - pip install -r requirements.txt - python install.py - env: - LIBRARY: ${{ matrix.library }} - DATASET: ${{ matrix.dataset }} - - - name: Run the benchmark - run: | - python create_dataset.py --dataset $DATASET - python run.py --algorithm $ALGORITHM --max-n-algorithms 2 --dataset $DATASET --timeout 600 - sudo chmod -R 777 results/ - python plot.py --dataset $DATASET --output plot.png - python data_export.py --output test.csv - - env: - ALGORITHM: ${{ matrix.algorithm}} - DATASET: ${{ matrix.dataset }} +# Contributed by @GuilhemN in https://github.com/erikbern/ann-benchmarks/pull/233 +name: Billion-Scale ANN Benchmarks + +on: [push, pull_request] + +jobs: + build: + runs-on: ubuntu-latest + strategy: + matrix: + include: + - algorithm: faiss-ivf + library: faissconda + dataset: random-xs + - algorithm: faiss-t1 + dataset: random-xs + library: faissconda + - algorithm: faiss-t1 + dataset: random-range-xs + library: faissconda + - algorithm: diskann-t2 + dataset: random-xs + library: diskann + - algorithm: diskann-t2 + dataset: random-range-xs + library: diskann + - algorithm: httpann_example + dataset: random-xs + library: httpann_example + - algorithm: httpann_example + dataset: random-range-xs + library: httpann_example + fail-fast: false + + steps: + - uses: actions/checkout@v2 # Pull the repository + + - name: Set up Python 3.6 + uses: actions/setup-python@v2 + with: + python-version: 3.6 + + - name: Install dependencies + run: | + pip install -r requirements.txt + python install.py + env: + LIBRARY: ${{ matrix.library }} + DATASET: ${{ matrix.dataset }} + + - name: Run the benchmark + run: | + python create_dataset.py --dataset $DATASET + python run.py --algorithm $ALGORITHM --max-n-algorithms 2 --dataset $DATASET --timeout 600 + sudo chmod -R 777 results/ + python plot.py --dataset $DATASET --output plot.png + python data_export.py --output test.csv + + env: + ALGORITHM: ${{ matrix.algorithm}} + DATASET: ${{ matrix.dataset }} diff --git a/GitHub_Logo_White.png b/GitHub_Logo_White.png new file mode 100644 index 000000000..c61ab9d05 Binary files /dev/null and b/GitHub_Logo_White.png differ diff --git a/LICENSE b/LICENSE deleted file mode 100644 index ef714baaa..000000000 --- a/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2021 Martin Aumüller - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/MSFT-Turing-ANNS-terms.txt b/MSFT-Turing-ANNS-terms.txt deleted file mode 100644 index e413bfd7b..000000000 --- a/MSFT-Turing-ANNS-terms.txt +++ /dev/null @@ -1,21 +0,0 @@ -Release Terms for the Microsoft-Turing-ANNS dataset ---------------------------------------------------- - -The MS-Turing-ANNS datasets are intended for non-commercial research -purposes only to promote advancement in the field of artificial -intelligence and related areas, and is made available free of charge -without extending any license or other intellectual property -rights. The dataset is provided “as is” without warranty and usage of -the data has risks since we may not own the underlying rights in the -documents. We are not be liable for any damages related to use of the -dataset. Feedback is voluntarily given and can be used as we see -fit. Upon violation of any of these terms, your rights to use the -dataset will end automatically. - - -Disclaimer ----------- - -Data sets are intended for research and academic purposes. Please note -that inputs or outputs may include content that users find offensive, -shocking, and/or otherwise harmful. diff --git a/README.md b/README.md index 89754454b..e78c3ea52 100644 --- a/README.md +++ b/README.md @@ -1,77 +1,77 @@ -# Billion-Scale ANN - - - -## Install - -The only prerequisite is Python (tested with 3.6) and Docker. Works with newer versions of Python as well but probably requires an updated `requirements.txt` on the host. (Suggestion: copy `requirements.txt` to `requirements${PYTHON_VERSION}.txt` and remove all fixed versions. `requirements.txt` has to be kept for the docker containers.) - -1. Clone the repo. -2. Run `pip install -r requirements.txt` (Use `requirements_py38.txt` if you have Python 3.8.) -3. Install docker by following instructions [here](https://docs.docker.com/engine/install/ubuntu/). -You might also want to follow the post-install steps for running docker in non-root user mode. -3. Run `python install.py` to build all the libraries inside Docker containers. - -## Storing Data - -The framework assumes that all data is stored in `data/`. -Please use a symlink if your datasets and indices are supposed to be stored somewhere else. -The location of the linked folder matters a great deal for SSD-based search performance in T2. -A local SSD such as the one found on Azure Ls-series VMs is better than remote disks, even premium ones. -See [T1/T2](t1_t2/README.md) for more details. - -## Data sets - -See for details on the different datasets. - -### Dataset Preparation - -Before running experiments, datasets have to be downloaded. All preparation can be carried out by calling - -```python -python create_dataset.py --dataset [bigann-1B | deep-1B | text2image-1B | ssnpp-1B | msturing-1B | msspacev-1B] -``` - -Note that downloading the datasets can potentially take many hours. - -For local testing, there exist smaller random datasets `random-xs` and `random-range-xs`. -Furthermore, most datasets have 1M, 10M and 100M versions, run `python create_dataset -h` to get an overview. - - -## Running the benchmark - -Run `python run.py --dataset $DS --algorithm $ALGO` where `DS` is the dataset you are running on, -and `ALGO` is the name of the algorithm. (Use `python run.py --list-algorithms`) to get an overview. -`python run.py -h` provides you with further options. - -The parameters used by the implementation to build and query the index can be found in `algos.yaml`. - -## Running the track 1 baseline -After running the installation, we can evaluate the baseline as follows. - -```bash - -for DS in bigann-1B deep-1B text2image-1B ssnpp-1B msturing-1B msspacev-1B; -do - python run.py --dataset $DS --algorithm faiss-t1; -done -``` - -On a 28-core Xeon E5-2690 v4 that provided 100MB/s downloads, carrying out the baseline experiments took roughly 7 days. - -To evaluate the results, run -```bash -sudo chmod -R 777 results/ -python data_export.py --output res.csv -python3.8 eval/show_operating_points.py --algorithm faiss-t1 --threshold 10000 -``` - -## Including your algorithm and Evaluating the Results - -See [Track T1/T2](t1_t2/README.md) for more details on evaluation for Tracks T1 and T2. - -See [Track T3](t3/README.md) for more details on evaluation for Track T3. - -# Credits - -This project is a version of [ann-benchmarks](https://github.com/erikbern/ann-benchmarks) by [Erik Bernhardsson](https://erikbern.com/) and contributors targetting billion-scale datasets. +# Billion-Scale ANN + + + +## Install + +The only prerequisite is Python (tested with 3.6) and Docker. Works with newer versions of Python as well but probably requires an updated `requirements.txt` on the host. (Suggestion: copy `requirements.txt` to `requirements${PYTHON_VERSION}.txt` and remove all fixed versions. `requirements.txt` has to be kept for the docker containers.) + +1. Clone the repo. +2. Run `pip install -r requirements.txt` (Use `requirements_py38.txt` if you have Python 3.8.) +3. Install docker by following instructions [here](https://docs.docker.com/engine/install/ubuntu/). +You might also want to follow the post-install steps for running docker in non-root user mode. +3. Run `python install.py` to build all the libraries inside Docker containers. + +## Storing Data + +The framework assumes that all data is stored in `data/`. +Please use a symlink if your datasets and indices are supposed to be stored somewhere else. +The location of the linked folder matters a great deal for SSD-based search performance in T2. +A local SSD such as the one found on Azure Ls-series VMs is better than remote disks, even premium ones. +See [T1/T2](t1_t2/README.md) for more details. + +## Data sets + +See for details on the different datasets. + +### Dataset Preparation + +Before running experiments, datasets have to be downloaded. All preparation can be carried out by calling + +```python +python create_dataset.py --dataset [bigann-1B | deep-1B | text2image-1B | ssnpp-1B | msturing-1B | msspacev-1B] +``` + +Note that downloading the datasets can potentially take many hours. + +For local testing, there exist smaller random datasets `random-xs` and `random-range-xs`. +Furthermore, most datasets have 1M, 10M and 100M versions, run `python create_dataset -h` to get an overview. + + +## Running the benchmark + +Run `python run.py --dataset $DS --algorithm $ALGO` where `DS` is the dataset you are running on, +and `ALGO` is the name of the algorithm. (Use `python run.py --list-algorithms`) to get an overview. +`python run.py -h` provides you with further options. + +The parameters used by the implementation to build and query the index can be found in `algos.yaml`. + +## Running the track 1 baseline +After running the installation, we can evaluate the baseline as follows. + +```bash + +for DS in bigann-1B deep-1B text2image-1B ssnpp-1B msturing-1B msspacev-1B; +do + python run.py --dataset $DS --algorithm faiss-t1; +done +``` + +On a 28-core Xeon E5-2690 v4 that provided 100MB/s downloads, carrying out the baseline experiments took roughly 7 days. + +To evaluate the results, run +```bash +sudo chmod -R 777 results/ +python data_export.py --output res.csv +python3.8 eval/show_operating_points.py --algorithm faiss-t1 --threshold 10000 +``` + +## Including your algorithm and Evaluating the Results + +See [Track T1/T2](t1_t2/README.md) for more details on evaluation for Tracks T1 and T2. + +See [Track T3](t3/README.md) for more details on evaluation for Track T3. + +# Credits + +This project is a version of [ann-benchmarks](https://github.com/erikbern/ann-benchmarks) by [Erik Bernhardsson](https://erikbern.com/) and contributors targetting billion-scale datasets. diff --git a/algos.yaml b/algos.yaml deleted file mode 100644 index 47126a73f..000000000 --- a/algos.yaml +++ /dev/null @@ -1,594 +0,0 @@ -any: - faiss-ivf: - docker-tag: billion-scale-benchmark-faissconda - module: benchmark.algorithms.faiss_inmem - constructor: FaissIVF - base-args: ["@metric"] - run-groups: - base: - args: [[1024,2048,4096,8192]] - query-args: [[1, 5, 10, 50, 100, 200]] -random-range-xs: - faiss-t1: - docker-tag: billion-scale-benchmark-faissconda - module: benchmark.algorithms.faiss_t1 - constructor: Faiss - base-args: ["@metric"] - run-groups: - base: - # the following args show all build arguments that can be provided to the t1 baseline. - args: | - [{"indexkey": "OPQ32_128,IVF1024_HNSW32,PQ32", - "buildthreads": -1, - "by_residual": -1, - "add_bs": 100000000, - "two_level_clustering": true, - "quantizer_add_efSearch": 80, - "quantizer_efConstruction": 200, - "maxtrain": 100000000, - "clustering_niter": 25 - }] - query-args: | - ["nprobe=1,quantizer_efSearch=4", - "nprobe=2,quantizer_efSearch=4", - "nprobe=2,quantizer_efSearch=8", - "nprobe=4,quantizer_efSearch=4", - "nprobe=2,quantizer_efSearch=16"] - diskann-t2: - docker-tag: billion-scale-benchmark-diskann - module: benchmark.algorithms.diskann-t2 - constructor: Diskann - base-args: ["@metric"] - run-groups: - base: - args: | - [{"R":32, "L":32, "B":0.0001, "M":1}] - query-args: | - [{"Lmin":3, "Lmax":100, "BW":4, "T":16}] - diskann-v0_3-t2: - docker-tag: billion-scale-benchmark-diskann - module: benchmark.algorithms.diskann-v0_3 - constructor: Diskann - base-args: ["@metric"] - run-groups: - base: - args: | - [{"R":32, "L":32, "B":0.0001, "M":1}] - query-args: | - [{"Lmin":3, "Lmax":100, "BW":4, "T":16}] - httpann_example: - docker-tag: billion-scale-benchmark-httpann_example - module: benchmark.algorithms.httpann_example - constructor: HttpANNExampleAlgorithm - base-args: [ "@metric", "@dimension" ] - run-groups: - base: - args: - - [ 0.2, 0.4, 1.0 ] - query-args: [] -random-xs: - faiss-t1: - docker-tag: billion-scale-benchmark-faissconda - module: benchmark.algorithms.faiss_t1 - constructor: Faiss - base-args: ["@metric"] - run-groups: - base: - args: | - [{"indexkey": "OPQ32_128,IVF1024_HNSW32,PQ32" - }] - query-args: | - ["nprobe=1,quantizer_efSearch=4", - "nprobe=2,quantizer_efSearch=4", - "nprobe=2,quantizer_efSearch=8", - "nprobe=4,quantizer_efSearch=4", - "nprobe=2,quantizer_efSearch=16"] - diskann-t2: - docker-tag: billion-scale-benchmark-diskann - module: benchmark.algorithms.diskann-t2 - constructor: Diskann - base-args: ["@metric"] - run-groups: - base: - args: | - [{"R":32, "L":32, "B":0.0001, "M":1}] - query-args: | - [{"Ls":10, "BW":4, "T":16}] - diskann-v0_3-t2: - docker-tag: billion-scale-benchmark-diskann - module: benchmark.algorithms.diskann-v0_3 - constructor: Diskann - base-args: ["@metric"] - run-groups: - base: - args: | - [{"R":32, "L":32, "B":0.0001, "M":1}] - query-args: | - [{"Ls":10, "BW":4, "T":16}] - httpann_example: - docker-tag: billion-scale-benchmark-httpann_example - module: benchmark.algorithms.httpann_example - constructor: HttpANNExampleAlgorithm - base-args: [ "@metric", "@dimension" ] - run-groups: - base: - args: - - [ 0.2, 0.8, 1.0 ] - query-args: [ ] -deep-10M: - faiss-t1: - docker-tag: billion-scale-benchmark-faissconda - module: benchmark.algorithms.faiss_t1 - constructor: Faiss - base-args: ["@metric"] - run-groups: - base: - args: | - [{"indexkey": "OPQ32_128,IVF65536_HNSW32,PQ32" - }] - query-args: | - ["nprobe=1,quantizer_efSearch=4", - "nprobe=2,quantizer_efSearch=4", - "nprobe=2,quantizer_efSearch=8", - "nprobe=4,quantizer_efSearch=4", - "nprobe=2,quantizer_efSearch=16"] - diskann-t2: - docker-tag: billion-scale-benchmark-diskann - module: benchmark.algorithms.diskann-t2 - constructor: Diskann - base-args: ["@metric"] - run-groups: - base: - args: | - [{"R":100, "L":100, "B":0.3, "M":15}] - query-args: | - [{"Ls":50, "BW":4, "T":16}] -deep-1B: - faiss-t1: - docker-tag: billion-scale-benchmark-faissconda - module: benchmark.algorithms.faiss_t1 - constructor: Faiss - base-args: ["@metric"] - run-groups: - base: - args: | - [{"indexkey": "OPQ64_128,IVF1048576_HNSW32,PQ64x4fsr", - "url": "https://dl.fbaipublicfiles.com/billion-scale-ann-benchmarks/track1_baseline_faiss/deep-1B.IVF1M_2level_PQ64x4fsr.faissindex" - }] - query-args: | - ["nprobe=1,quantizer_efSearch=4", - "nprobe=2,quantizer_efSearch=4", - "nprobe=2,quantizer_efSearch=8", - "nprobe=4,quantizer_efSearch=4", - "nprobe=2,quantizer_efSearch=16", - "nprobe=4,quantizer_efSearch=8", - "nprobe=4,quantizer_efSearch=16", - "nprobe=8,quantizer_efSearch=4", - "nprobe=8,quantizer_efSearch=8", - "nprobe=8,quantizer_efSearch=16", - "nprobe=8,quantizer_efSearch=32", - "nprobe=8,quantizer_efSearch=64", - "nprobe=16,quantizer_efSearch=8", - "nprobe=16,quantizer_efSearch=16", - "nprobe=16,quantizer_efSearch=32", - "nprobe=16,quantizer_efSearch=64", - "nprobe=16,quantizer_efSearch=128", - "nprobe=32,quantizer_efSearch=16", - "nprobe=32,quantizer_efSearch=32", - "nprobe=32,quantizer_efSearch=64", - "nprobe=32,quantizer_efSearch=128", - "nprobe=32,quantizer_efSearch=256", - "nprobe=64,quantizer_efSearch=16", - "nprobe=64,quantizer_efSearch=32", - "nprobe=64,quantizer_efSearch=64", - "nprobe=64,quantizer_efSearch=128", - "nprobe=64,quantizer_efSearch=256", - "nprobe=64,quantizer_efSearch=512", - "nprobe=128,quantizer_efSearch=32", - "nprobe=128,quantizer_efSearch=64", - "nprobe=128,quantizer_efSearch=128", - "nprobe=128,quantizer_efSearch=256", - "nprobe=128,quantizer_efSearch=512", - "nprobe=256,quantizer_efSearch=64", - "nprobe=256,quantizer_efSearch=128"] - diskann-t2: - docker-tag: billion-scale-benchmark-diskann - module: benchmark.algorithms.diskann-t2 - constructor: Diskann - base-args: ["@metric"] - run-groups: - base: - args: | - [{"R":100, "L":100, "B":50, "M":110, - "url": "https://comp21storage.blob.core.windows.net/publiccontainer/comp21/diskann-T2-baseline-indices/deep-1B/R100_L100_B50_M110" - }] - query-args: | - [{"Ls":30, "BW":4, "T":16}, - {"Ls":40, "BW":4, "T":16}, - {"Ls":50, "BW":4, "T":16}, - {"Ls":53, "BW":4, "T":16}, - {"Ls":56, "BW":4, "T":16}, - {"Ls":58, "BW":4, "T":16}, - {"Ls":60, "BW":4, "T":16}, - {"Ls":70, "BW":4, "T":16}, - {"Ls":80, "BW":4, "T":16}, - {"Ls":100, "BW":4, "T":16}] -msspacev-1B: - faiss-t1: - docker-tag: billion-scale-benchmark-faissconda - module: benchmark.algorithms.faiss_t1 - constructor: Faiss - base-args: ["@metric"] - run-groups: - base: - args: | - [{"indexkey": "OPQ64_128,IVF1048576_HNSW32,PQ64x4fsr", - "url": "https://dl.fbaipublicfiles.com/billion-scale-ann-benchmarks/track1_baseline_faiss/msspacev-1B.IVF1M_2level_PQ64x4fsr.faissindex", - "query_bs": 4096 - }] - query-args: | - ["nprobe=1,quantizer_efSearch=4", - "nprobe=1,quantizer_efSearch=8", - "nprobe=1,quantizer_efSearch=16", - "nprobe=2,quantizer_efSearch=8", - "nprobe=2,quantizer_efSearch=16", - "nprobe=4,quantizer_efSearch=16", - "nprobe=4,quantizer_efSearch=32", - "nprobe=4,quantizer_efSearch=64", - "nprobe=8,quantizer_efSearch=32", - "nprobe=8,quantizer_efSearch=64", - "nprobe=16,quantizer_efSearch=32", - "nprobe=8,quantizer_efSearch=128", - "nprobe=16,quantizer_efSearch=64", - "nprobe=16,quantizer_efSearch=128", - "nprobe=32,quantizer_efSearch=64", - "nprobe=32,quantizer_efSearch=128", - "nprobe=32,quantizer_efSearch=256", - "nprobe=64,quantizer_efSearch=128", - "nprobe=64,quantizer_efSearch=256", - "nprobe=64,quantizer_efSearch=512", - "nprobe=128,quantizer_efSearch=128", - "nprobe=128,quantizer_efSearch=256", - "nprobe=128,quantizer_efSearch=512", - "nprobe=256,quantizer_efSearch=256", - "nprobe=256,quantizer_efSearch=512"] - diskann-t2: - docker-tag: billion-scale-benchmark-diskann - module: benchmark.algorithms.diskann-t2 - constructor: Diskann - base-args: ["@metric"] - run-groups: - base: - args: | - [{"R":100, "L":100, "B":47, "M":100, - "url": "https://comp21storage.blob.core.windows.net/publiccontainer/comp21/diskann-T2-baseline-indices/msspacev-1B/R100_L100_B47_M100" - }] - query-args: | - [{"Ls":40, "BW":4, "T":16}, - {"Ls":50, "BW":4, "T":16}, - {"Ls":60, "BW":4, "T":16}, - {"Ls":70, "BW":4, "T":16}, - {"Ls":80, "BW":4, "T":16}, - {"Ls":90, "BW":4, "T":16}, - {"Ls":100, "BW":4, "T":16}, - {"Ls":110, "BW":4, "T":16}, - {"Ls":120, "BW":4, "T":16}, - {"Ls":130, "BW":4, "T":16}] -msturing-1B: - faiss-t1: - docker-tag: billion-scale-benchmark-faissconda - module: benchmark.algorithms.faiss_t1 - constructor: Faiss - base-args: ["@metric"] - run-groups: - base: - args: | - [{"indexkey": "OPQ64_128,IVF1048576_HNSW32,PQ64x4fsr", - "url": "https://dl.fbaipublicfiles.com/billion-scale-ann-benchmarks/track1_baseline_faiss/msturing-1B.IVF1M_2level_PQ64x4fsr.faissindex", - "query_bs": 4096 - }] - query-args: | - ["nprobe=1,quantizer_efSearch=4", - "nprobe=2,quantizer_efSearch=4", - "nprobe=1,quantizer_efSearch=8", - "nprobe=2,quantizer_efSearch=8", - "nprobe=1,quantizer_efSearch=16", - "nprobe=2,quantizer_efSearch=16", - "nprobe=4,quantizer_efSearch=16", - "nprobe=2,quantizer_efSearch=32", - "nprobe=4,quantizer_efSearch=32", - "nprobe=8,quantizer_efSearch=32", - "nprobe=8,quantizer_efSearch=64", - "nprobe=16,quantizer_efSearch=64", - "nprobe=16,quantizer_efSearch=128", - "nprobe=32,quantizer_efSearch=128", - "nprobe=32,quantizer_efSearch=256", - "nprobe=64,quantizer_efSearch=128", - "nprobe=64,quantizer_efSearch=256", - "nprobe=128,quantizer_efSearch=128", - "nprobe=64,quantizer_efSearch=512", - "nprobe=128,quantizer_efSearch=256", - "nprobe=128,quantizer_efSearch=512", - "nprobe=256,quantizer_efSearch=256", - "nprobe=256,quantizer_efSearch=512"] - diskann-t2: - docker-tag: billion-scale-benchmark-diskann - module: benchmark.algorithms.diskann-t2 - constructor: Diskann - base-args: ["@metric"] - run-groups: - base: - args: | - [{"R":100, "L":100, "B":50, "M":80, - "url": "https://comp21storage.blob.core.windows.net/publiccontainer/comp21/diskann-T2-baseline-indices/msturing-1B/R100_L100_B50_M80" - }] - query-args: | - [{"Ls":30, "BW":4, "T":16}, - {"Ls":40, "BW":4, "T":16}, - {"Ls":50, "BW":4, "T":16}, - {"Ls":55, "BW":4, "T":16}, - {"Ls":57, "BW":4, "T":16}, - {"Ls":59, "BW":4, "T":16}, - {"Ls":60, "BW":4, "T":16}, - {"Ls":70, "BW":4, "T":16}, - {"Ls":80, "BW":4, "T":16}, - {"Ls":100, "BW":4, "T":16}] -bigann-1B: - faiss-t1: - docker-tag: billion-scale-benchmark-faissconda - module: benchmark.algorithms.faiss_t1 - constructor: Faiss - base-args: ["@metric"] - run-groups: - base: - args: | - [{"indexkey": "OPQ64_128,IVF1048576_HNSW32,PQ64x4fsr", - "url": "https://dl.fbaipublicfiles.com/billion-scale-ann-benchmarks/track1_baseline_faiss/bigann-1B.IVF1M_2level_PQ64x4fsr.faissindex" - }] - query-args: | - ["nprobe=1,quantizer_efSearch=4", - "nprobe=2,quantizer_efSearch=4", - "nprobe=4,quantizer_efSearch=4", - "nprobe=4,quantizer_efSearch=8", - "nprobe=8,quantizer_efSearch=4", - "nprobe=8,quantizer_efSearch=8", - "nprobe=8,quantizer_efSearch=16", - "nprobe=8,quantizer_efSearch=32", - "nprobe=16,quantizer_efSearch=16", - "nprobe=16,quantizer_efSearch=32", - "nprobe=16,quantizer_efSearch=64", - "nprobe=32,quantizer_efSearch=8", - "nprobe=32,quantizer_efSearch=32", - "nprobe=32,quantizer_efSearch=64", - "nprobe=32,quantizer_efSearch=128", - "nprobe=64,quantizer_efSearch=16", - "nprobe=64,quantizer_efSearch=32", - "nprobe=64,quantizer_efSearch=64", - "nprobe=64,quantizer_efSearch=128", - "nprobe=64,quantizer_efSearch=256", - "nprobe=128,quantizer_efSearch=32", - "nprobe=128,quantizer_efSearch=64", - "nprobe=128,quantizer_efSearch=128", - "nprobe=128,quantizer_efSearch=256", - "nprobe=128,quantizer_efSearch=512", - "nprobe=256,quantizer_efSearch=64", - "nprobe=256,quantizer_efSearch=128", - "nprobe=256,quantizer_efSearch=512"] - diskann-t2: - docker-tag: billion-scale-benchmark-diskann - module: benchmark.algorithms.diskann-t2 - constructor: Diskann - base-args: ["@metric"] - run-groups: - base: - args: | - [{"R":100, "L":100, "B":50, "M":80, - "url": "https://comp21storage.blob.core.windows.net/publiccontainer/comp21/diskann-T2-baseline-indices/bigann-1B/R100_L100_B50_M80" - }] - query-args: | - [{"Ls":30, "BW":4, "T":16}, - {"Ls":40, "BW":4, "T":16}, - {"Ls":50, "BW":4, "T":16}, - {"Ls":55, "BW":4, "T":16}, - {"Ls":60, "BW":4, "T":16}, - {"Ls":62, "BW":4, "T":16}, - {"Ls":65, "BW":4, "T":16}, - {"Ls":70, "BW":4, "T":16}, - {"Ls":80, "BW":4, "T":16}, - {"Ls":100, "BW":4, "T":16}] -ssnpp-1B: - faiss-t1: - docker-tag: billion-scale-benchmark-faissconda - module: benchmark.algorithms.faiss_t1 - constructor: Faiss - base-args: ["@metric"] - run-groups: - base: - args: | - [{"indexkey": "OPQ32_128,IVF1048576_HNSW32,PQ32", - "url": "https://dl.fbaipublicfiles.com/billion-scale-ann-benchmarks/track1_baseline_faiss/ssnpp-1B.IVF1M_2level_PQ32.faissindex" - }] - query-args: | - ["nprobe=1,quantizer_efSearch=4,ht=92", - "nprobe=1,quantizer_efSearch=4,ht=98", - "nprobe=1,quantizer_efSearch=4,ht=104", - "nprobe=1,quantizer_efSearch=4,ht=112", - "nprobe=1,quantizer_efSearch=8,ht=96", - "nprobe=1,quantizer_efSearch=8,ht=108", - "nprobe=1,quantizer_efSearch=16,ht=98", - "nprobe=1,quantizer_efSearch=16,ht=114", - "nprobe=1,quantizer_efSearch=16,ht=116", - "nprobe=1,quantizer_efSearch=32,ht=98", - "nprobe=1,quantizer_efSearch=32,ht=110", - "nprobe=1,quantizer_efSearch=32,ht=112", - "nprobe=1,quantizer_efSearch=32,ht=120", - "nprobe=4,quantizer_efSearch=16,ht=256", - "nprobe=4,quantizer_efSearch=32,ht=104", - "nprobe=4,quantizer_efSearch=32,ht=112", - "nprobe=4,quantizer_efSearch=32,ht=256", - "nprobe=8,quantizer_efSearch=32,ht=112", - "nprobe=4,quantizer_efSearch=64,ht=256", - "nprobe=8,quantizer_efSearch=64,ht=116", - "nprobe=8,quantizer_efSearch=64,ht=128", - "nprobe=16,quantizer_efSearch=32,ht=256", - "nprobe=16,quantizer_efSearch=64,ht=118", - "nprobe=32,quantizer_efSearch=64,ht=256", - "nprobe=64,quantizer_efSearch=256,ht=116", - "nprobe=32,quantizer_efSearch=512,ht=256", - "nprobe=64,quantizer_efSearch=512,ht=126", - "nprobe=256,quantizer_efSearch=256,ht=128"] - diskann-t2: - docker-tag: billion-scale-benchmark-diskann - module: benchmark.algorithms.diskann-t2 - constructor: Diskann - base-args: ["@metric"] - run-groups: - base: - args: | - [{"R":100, "L":100, "B":60, "M":100, "C":500000, "CM":2, - "url": "https://comp21storage.blob.core.windows.net/publiccontainer/comp21/diskann-T2-baseline-indices/fbssnpp-1B/R100_L100_B60_M100" - }] - query-args: | - [{"Lmin":8, "Lmax":10000, "BW":4, "T":16}, - {"Lmin":12, "Lmax":10000, "BW":4, "T":16}, - {"Lmin":8, "Lmax":20000, "BW":4, "T":16}, - {"Lmin":12, "Lmax":20000, "BW":4, "T":16}, - {"Lmin":8, "Lmax":30000, "BW":4, "T":16}, - {"Lmin":12, "Lmax":30000, "BW":4, "T":16}, - {"Lmin":16, "Lmax":30000, "BW":4, "T":16}, - {"Lmin":8, "Lmax":45000, "BW":4, "T":16}, - {"Lmin":12, "Lmax":45000, "BW":4, "T":16}, - {"Lmin":16, "Lmax":45000, "BW":4, "T":16}] -text2image-1B: - faiss-t1: - docker-tag: billion-scale-benchmark-faissconda - module: benchmark.algorithms.faiss_t1 - constructor: Faiss - base-args: ["@metric"] - run-groups: - base: - args: | - [{"indexkey": "OPQ32_128,IVF1048576_HNSW32,PQ32", - "url": "https://dl.fbaipublicfiles.com/billion-scale-ann-benchmarks/track1_baseline_faiss/text2image-1B.IVF1M_2level_PQ32.faissindex" - }] - # Trying all combinations of nprobe x efSearch - query-args: | - ["nprobe=1,quantizer_efSearch=4,ht=106", - "nprobe=1,quantizer_efSearch=16,ht=114", - "nprobe=1,quantizer_efSearch=16,ht=116", - "nprobe=2,quantizer_efSearch=16,ht=110", - "nprobe=4,quantizer_efSearch=8,ht=118", - "nprobe=4,quantizer_efSearch=32,ht=256", - "nprobe=8,quantizer_efSearch=16,ht=114", - "nprobe=4,quantizer_efSearch=64,ht=118", - "nprobe=4,quantizer_efSearch=64,ht=122", - "nprobe=8,quantizer_efSearch=32,ht=116", - "nprobe=8,quantizer_efSearch=64,ht=116", - "nprobe=8,quantizer_efSearch=64,ht=126", - "nprobe=16,quantizer_efSearch=64,ht=256", - "nprobe=16,quantizer_efSearch=256,ht=118", - "nprobe=16,quantizer_efSearch=512,ht=256", - "nprobe=32,quantizer_efSearch=512,ht=128", - "nprobe=64,quantizer_efSearch=128,ht=126", - "nprobe=128,quantizer_efSearch=512,ht=256", - "nprobe=256,quantizer_efSearch=512,ht=120", - "nprobe=256,quantizer_efSearch=512,ht=122"] - diskann-t2: - docker-tag: billion-scale-benchmark-diskann - module: benchmark.algorithms.diskann-t2 - constructor: Diskann - base-args: ["@metric"] - run-groups: - base: - args: | - [{"R":100, "L":100, "B":60, "M":115, "PQ":200, "C":500000, "CM":2, - "url": "https://comp21storage.blob.core.windows.net/publiccontainer/comp21/diskann-T2-baseline-indices/text2image-1B/R100_L100_B60_M115_PQ200" - }] - query-args: | - [{"Ls":10, "BW":10, "T":16}, - {"Ls":20, "BW":10, "T":16}, - {"Ls":30, "BW":10, "T":16}, - {"Ls":40, "BW":10, "T":16}, - {"Ls":50, "BW":10, "T":16}, - {"Ls":60, "BW":10, "T":16}, - {"Ls":70, "BW":10, "T":16}, - {"Ls":80, "BW":10, "T":16}, - {"Ls":90, "BW":10, "T":16}, - {"Ls":100, "BW":10, "T":16}] -ssnpp-10M: - faiss-t1: - docker-tag: billion-scale-benchmark-faissconda - module: benchmark.algorithms.faiss_t1 - constructor: Faiss - base-args: ["@metric"] - run-groups: - base: - args: | - [{"indexkey": "OPQ32_128,IVF65536_HNSW32,PQ32" - }] - query-args: | - ["nprobe=1,quantizer_efSearch=4,ht=92", - "nprobe=1,quantizer_efSearch=4,ht=98", - "nprobe=1,quantizer_efSearch=4,ht=104", - "nprobe=1,quantizer_efSearch=4,ht=112"] -bigann-10M: - diskann-t2: - docker-tag: billion-scale-benchmark-diskann - module: benchmark.algorithms.diskann-t2 - constructor: Diskann - base-args: ["@metric"] - run-groups: - base: - args: | - [{"R":100, "L":100, "B":0.3, "M":15}] - query-args: | - [{"Ls":50, "BW":4, "T":16}] - -msturing-1M: - diskann-t2: - docker-tag: billion-scale-benchmark-diskann - module: benchmark.algorithms.diskann-t2 - constructor: Diskann - base-args: ["@metric"] - run-groups: - base: - args: | - [{"R":50, "L":50, "B":0.03, "M":1}] - query-args: | - [{"Ls":50, "BW":4, "T":16}] -msspacev-1M: - diskann-t2: - docker-tag: billion-scale-benchmark-diskann - module: benchmark.algorithms.diskann-t2 - constructor: Diskann - base-args: ["@metric"] - run-groups: - base: - args: | - [{"R":50, "L":50, "B":0.03, "M":1}] - query-args: | - [{"Ls":50, "BW":4, "T":16}] -text2image-1M: - diskann-t2: - docker-tag: billion-scale-benchmark-diskann - module: benchmark.algorithms.diskann-t2 - constructor: Diskann - base-args: ["@metric"] - run-groups: - base: - args: | - [{"R":50, "L":50, "B":0.03, "M":1, "PQ":200}] - query-args: | - [{"Ls":50, "BW":4, "T":16}] -text2image-10M: - diskann-t2: - docker-tag: billion-scale-benchmark-diskann - module: benchmark.algorithms.diskann-t2 - constructor: Diskann - base-args: ["@metric"] - run-groups: - base: - args: | - [{"R":50, "L":50, "B":0.3, "M":10, "PQ":200}] - query-args: | - [{"Ls":50, "BW":4, "T":16}] diff --git a/assets/aws_logo.png b/assets/aws_logo.png new file mode 100644 index 000000000..06df0852f Binary files /dev/null and b/assets/aws_logo.png differ diff --git a/assets/cmt_logo.png b/assets/cmt_logo.png new file mode 100644 index 000000000..4ad6a8404 Binary files /dev/null and b/assets/cmt_logo.png differ diff --git a/assets/discord_logo.png b/assets/discord_logo.png new file mode 100644 index 000000000..254977f0b Binary files /dev/null and b/assets/discord_logo.png differ diff --git a/assets/github_logo.png b/assets/github_logo.png new file mode 100644 index 000000000..7356d06ed Binary files /dev/null and b/assets/github_logo.png differ diff --git a/assets/github_logo_white.png b/assets/github_logo_white.png new file mode 100644 index 000000000..09cb6cc0b Binary files /dev/null and b/assets/github_logo_white.png differ diff --git a/assets/microsoft_logo.png b/assets/microsoft_logo.png new file mode 100644 index 000000000..315a95007 Binary files /dev/null and b/assets/microsoft_logo.png differ diff --git a/assets/og.png b/assets/og.png new file mode 100644 index 000000000..436c4c482 Binary files /dev/null and b/assets/og.png differ diff --git a/assets/organizers/amir-ingber.png b/assets/organizers/amir-ingber.png new file mode 100644 index 000000000..b895c1ff1 Binary files /dev/null and b/assets/organizers/amir-ingber.png differ diff --git a/assets/organizers/dmitry-baranchuk.png b/assets/organizers/dmitry-baranchuk.png new file mode 100644 index 000000000..ae807f3c3 Binary files /dev/null and b/assets/organizers/dmitry-baranchuk.png differ diff --git a/assets/organizers/edo-liberty.png b/assets/organizers/edo-liberty.png new file mode 100644 index 000000000..f2911ee4d Binary files /dev/null and b/assets/organizers/edo-liberty.png differ diff --git a/assets/organizers/frank-liu.png b/assets/organizers/frank-liu.png new file mode 100644 index 000000000..41d8334cb Binary files /dev/null and b/assets/organizers/frank-liu.png differ diff --git a/assets/organizers/george-williams.png b/assets/organizers/george-williams.png new file mode 100644 index 000000000..3de8649f7 Binary files /dev/null and b/assets/organizers/george-williams.png differ diff --git a/assets/organizers/harsha-simhadri.png b/assets/organizers/harsha-simhadri.png new file mode 100644 index 000000000..626f6420f Binary files /dev/null and b/assets/organizers/harsha-simhadri.png differ diff --git "a/assets/organizers/martin-aum\303\274ller.png" "b/assets/organizers/martin-aum\303\274ller.png" new file mode 100644 index 000000000..a1deecbbc Binary files /dev/null and "b/assets/organizers/martin-aum\303\274ller.png" differ diff --git a/assets/organizers/matthijs-douze.png b/assets/organizers/matthijs-douze.png new file mode 100644 index 000000000..c0f6d8161 Binary files /dev/null and b/assets/organizers/matthijs-douze.png differ diff --git a/assets/pinecone-logo.svg b/assets/pinecone-logo.svg new file mode 100644 index 000000000..4f2753237 --- /dev/null +++ b/assets/pinecone-logo.svg @@ -0,0 +1,36 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/assets/zilliz-logo.png b/assets/zilliz-logo.png new file mode 100644 index 000000000..8b66a559e Binary files /dev/null and b/assets/zilliz-logo.png differ diff --git a/benchmark/__init__.py b/benchmark/__init__.py deleted file mode 100644 index c3961685a..000000000 --- a/benchmark/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from __future__ import absolute_import diff --git a/benchmark/algorithms/base.py b/benchmark/algorithms/base.py deleted file mode 100644 index 903903b14..000000000 --- a/benchmark/algorithms/base.py +++ /dev/null @@ -1,113 +0,0 @@ -from __future__ import absolute_import -import psutil - -class BaseANN(object): - def done(self): - """ - This is called after results have been processed. - Use it for cleaning up if necessary. - """ - pass - - def track(self): - """ - return "T1" if submitting an entry for track 1 - return "T2" if submitting an entry for track 2 - return "T3" if submitting an entry for track 3 - """ - raise NotImplementedError() - - def fit(self, dataset): - """ - Build the index for the data points given in dataset name. - Assumes that after fitting index is loaded in memory. - """ - raise NotImplementedError() - - def load_index(self, dataset): - """ - Load the index for dataset. Returns False if index - is not available, True otherwise. - - Checking the index usually involves the dataset name - and the index build paramters passed during construction. - """ - raise NotImplementedError() - - def index_files_to_store(self, dataset): - """ - Specify a triplet with the local directory path of index files, - the common prefix name of index component(s) and a list of - index components that need to be uploaded to (after build) - or downloaded from (for search) cloud storage. - - For local directory path under docker environment, please use - a directory under - data/indices/track(T1 or T2)/algo.__str__()/DATASETS[dataset]().short_name() - """ - raise NotImplementedError() - - def query(self, X, k): - """Carry out a batch query for k-NN of query set X.""" - raise NotImplementedError() - - def range_query(self, X, radius): - """ - Carry out a batch query for range search with - radius. - """ - raise NotImplementedError() - - def get_results(self): - """ - Helper method to convert query results of k-NN search. - If there are nq queries, returns a (nq, k) array of integers - representing the indices of the k-NN for each query. - """ - return self.res - - def get_range_results(self): - """ - Helper method to convert query results of range search. - If there are nq queries, returns a triple lims, D, I. - lims is a (nq) array, such that - - I[lims[q]:lims[q + 1]] in int - - are the indices of the indices of the range results of query q, and - - D[lims[q]:lims[q + 1]] in float - - are the distances. - """ - return self.res - - def get_additional(self): - """ - Retrieve additional results. - Return a dictionary with metrics - and corresponding measured values. - - The following additional metrics are supported: - - `mean_latency` in microseconds, if this applies to your algorithm. - Skip if your algorithm batches query processing. - - `latency_999` is the 99.9pc latency in microseconds, if this applies - to your algorithm. Skip if your algorithm batches query processing. - - `dist_comps` is the total number of points in the base set - to which a query was compared. - - `mean_ssd_ios` is the average number of SSD I/Os per query for T2 algorithms. - """ - return {} - - def __str__(self): - return self.name - - def get_memory_usage(self): - """Return the current memory usage of this algorithm instance - (in kilobytes), or None if this information is not available.""" - # return in kB for backwards compatibility - return psutil.Process().memory_info().rss / 1024 diff --git a/benchmark/algorithms/definitions.py b/benchmark/algorithms/definitions.py deleted file mode 100644 index 836282bc7..000000000 --- a/benchmark/algorithms/definitions.py +++ /dev/null @@ -1,179 +0,0 @@ -from __future__ import absolute_import -from os import sep as pathsep -import collections -import importlib -import os -import sys -import traceback -import yaml -import json -from enum import Enum -from itertools import product - - -Definition = collections.namedtuple( - 'Definition', - ['algorithm', 'constructor', 'module', 'docker_tag', - 'arguments', 'query_argument_groups', 'disabled']) - - -def instantiate_algorithm(definition): - print('Trying to instantiate %s.%s(%s)' % - (definition.module, definition.constructor, definition.arguments)) - module = importlib.import_module(definition.module) - constructor = getattr(module, definition.constructor) - return constructor(*definition.arguments) - - -class InstantiationStatus(Enum): - AVAILABLE = 0 - NO_CONSTRUCTOR = 1 - NO_MODULE = 2 - - -def algorithm_status(definition): - try: - module = importlib.import_module(definition.module) - if hasattr(module, definition.constructor): - return InstantiationStatus.AVAILABLE - else: - return InstantiationStatus.NO_CONSTRUCTOR - except ImportError: - return InstantiationStatus.NO_MODULE - - -def _generate_combinations(args): - if isinstance(args, list): - args = [el if isinstance(el, list) else [el] for el in args] - return [list(x) for x in product(*args)] - elif isinstance(args, dict): - flat = [] - for k, v in args.items(): - if isinstance(v, list): - flat.append([(k, el) for el in v]) - else: - flat.append([(k, v)]) - return [dict(x) for x in product(*flat)] - elif isinstance(args, str): - l = json.loads(args.strip()) - return l - else: - raise TypeError("No args handling exists for %s" % type(args).__name__) - - -def _substitute_variables(arg, vs): - if isinstance(arg, dict): - return dict([(k, _substitute_variables(v, vs)) - for k, v in arg.items()]) - elif isinstance(arg, list): - return [_substitute_variables(a, vs) for a in arg] - elif isinstance(arg, str) and arg in vs: - return vs[arg] - else: - return arg - - -def _get_definitions(definition_file): - with open(definition_file, "r") as f: - return yaml.load(f, yaml.SafeLoader) - - -def list_algorithms(definition_file): - definitions = _get_definitions(definition_file) - - print('The following algorithms are supported...') - for point in definitions: - print('\t... for the point type "%s"...' % point) - for metric in definitions[point]: - print('\t\t... and the distance metric "%s":' % metric) - for algorithm in definitions[point][metric]: - print('\t\t\t%s' % algorithm) - - -def get_unique_algorithms(definition_file): - definitions = _get_definitions(definition_file) - algos = set() - for point in definitions: - for metric in definitions[point]: - for algorithm in definitions[point][metric]: - algos.add(algorithm) - return list(sorted(algos)) - - -def get_definitions(definition_file, dimension, dataset, - distance_metric="euclidean", count=10): - - definitions = _get_definitions(definition_file) - - algorithm_definitions = {} - if "any" in definitions: - algorithm_definitions.update(definitions["any"]) - if dataset in definitions: - algorithm_definitions.update(definitions[dataset]) - - definitions = [] - for (name, algo) in algorithm_definitions.items(): - for k in ['docker-tag', 'module', 'constructor']: - if k not in algo: - raise Exception( - 'algorithm %s does not define a "%s" property' % (name, k)) - - base_args = [] - if "base-args" in algo: - base_args = algo["base-args"] - - for run_group in algo["run-groups"].values(): - if "arg-groups" in run_group: - groups = [] - for arg_group in run_group["arg-groups"]: - if isinstance(arg_group, dict): - # Dictionaries need to be expanded into lists in order - # for the subsequent call to _generate_combinations to - # do the right thing - groups.append(_generate_combinations(arg_group)) - else: - groups.append(arg_group) - args = _generate_combinations(groups) - elif "args" in run_group: - args = _generate_combinations(run_group["args"]) - else: - assert False, "? what? %s" % run_group - - if "query-arg-groups" in run_group: - groups = [] - for arg_group in run_group["query-arg-groups"]: - if isinstance(arg_group, dict): - groups.append(_generate_combinations(arg_group)) - else: - groups.append(arg_group) - query_args = _generate_combinations(groups) - elif "query-args" in run_group: - query_args = _generate_combinations(run_group["query-args"]) - else: - query_args = [] - - for arg_group in args: - aargs = [] - aargs.extend(base_args) - if isinstance(arg_group, list): - aargs.extend(arg_group) - else: - aargs.append(arg_group) - - vs = { - "@count": count, - "@metric": distance_metric, - "@dimension": dimension - } - aargs = [_substitute_variables(arg, vs) for arg in aargs] - definitions.append(Definition( - algorithm=name, - docker_tag=algo['docker-tag'], - module=algo['module'], - constructor=algo['constructor'], - arguments=aargs, - query_argument_groups=query_args, - disabled=algo.get('disabled', False) - )) - - return definitions diff --git a/benchmark/algorithms/diskann-t2.py b/benchmark/algorithms/diskann-t2.py deleted file mode 100755 index 24daaa023..000000000 --- a/benchmark/algorithms/diskann-t2.py +++ /dev/null @@ -1,222 +0,0 @@ -from __future__ import absolute_import -import psutil -import os -import time -import numpy as np -import diskannpy - -from benchmark.algorithms.base import BaseANN -from benchmark.datasets import DATASETS, download_accelerated - -class Diskann(BaseANN): - def __init__(self, metric, index_params): - self.name = "DiskANN" - if (index_params.get("R")==None): - print("Error: missing parameter R") - return - if (index_params.get("L")==None): - print("Error: missing parameter L") - return - if (index_params.get("B")==None): - print("Error: missing parameter B") - return - if(index_params.get("M")==None): - print("Error: missing parameter M") - return - - self._index_params = index_params - self._metric = metric - - self.R = index_params.get("R") - self.L = index_params.get("L") - self.B = index_params.get("B") - self.M = index_params.get("M") - self.PQ = 0 if index_params.get("PQ") == None else index_params.get("PQ") - self.C = -1 if index_params.get("C") == None else index_params.get("C") - self.cache_mechanism = 1 if index_params.get("CM") == None else index_params.get("CM") - if self.C == 0: - self.cache_mechanism = 0 - print(self.PQ) - - def track(self): - return "T2" - - def index_name(self): - if self.PQ == 0: - return f"R{self.R}_L{self.L}_B{self.B}_M{self.M}" - else: - return f"R{self.R}_L{self.L}_B{self.B}_M{self.M}_PQ{self.PQ}" - - def create_index_dir(self, dataset): - index_dir = os.path.join(os.getcwd(), "data", "indices") - os.makedirs(index_dir, mode=0o777, exist_ok=True) - index_dir = os.path.join(index_dir, "T2") - os.makedirs(index_dir, mode=0o777, exist_ok=True) - index_dir = os.path.join(index_dir, self.__str__()) - os.makedirs(index_dir, mode=0o777, exist_ok=True) - index_dir = os.path.join(index_dir, dataset.short_name()) - os.makedirs(index_dir, mode=0o777, exist_ok=True) - index_dir = os.path.join(index_dir, self.index_name()) - os.makedirs(index_dir, mode=0o777, exist_ok=True) - return index_dir - - def fit(self, dataset): - """ - Build the index for the data points given in dataset name. - """ - - ds = DATASETS[dataset]() - d = ds.d - - buildthreads = self._index_params.get("buildthreads", -1) - if buildthreads == -1: - buildthreads = diskannpy.omp_get_max_threads() - - print("Set build-time number of threads:", buildthreads) - diskannpy.omp_set_num_threads(buildthreads) - - index_dir = self.create_index_dir(ds) - self.index_path = os.path.join(index_dir, self.index_name()) - - if ds.distance() == "euclidean": - metric = diskannpy.L2 - elif ds.distance() == "ip": - metric = diskannpy.INNER_PRODUCT - else: - print("Unsuported distance function.") - return False - - - if not hasattr(self, 'index'): - if ds.dtype == "float32": - self.index = diskannpy.DiskANNFloatIndex(metric) - elif ds.dtype == "int8": - self.index = diskannpy.DiskANNInt8Index(metric) - elif ds.dtype == "uint8": - self.index = diskannpy.DiskANNUInt8Index(metric) - else: - print ("Unsupported data type.") - return False - - start = time.time() - if self.PQ > 0: - self.index.build(ds.get_dataset_fn(), self.index_path, self.R, self.L, self.B, self.M, buildthreads, self.PQ) - else: - self.index.build(ds.get_dataset_fn(), self.index_path, self.R, self.L, self.B, self.M, buildthreads) - end = time.time() - print("DiskANN index built in %.3f s" % (end - start)) - - - if self.C > 0: - num_nodes_to_cache = self.C - else: - num_nodes_to_cache = int(ds.nb/1000) if ds.nb > 1000000 else int(ds.nb/100) - print(f"Loading index and caching {num_nodes_to_cache} nodes..") - self.index.load_index(self.index_path, diskannpy.omp_get_max_threads(), num_nodes_to_cache, self.cache_mechanism) - - def get_index_components(self, dataset): - index_components = [ - '_pq_pivots.bin', '_pq_pivots.bin_centroid.bin', '_pq_pivots.bin_chunk_offsets.bin', - '_pq_pivots.bin_rearrangement_perm.bin', '_sample_data.bin', '_sample_ids.bin', - '_pq_compressed.bin', '_disk.index' - ] - ds = DATASETS[dataset]() - if ds.distance() == "ip": - index_components = index_components + [ - '_disk.index_centroids.bin', '_disk.index_max_base_norm.bin', '_disk.index_medoids.bin' - ] - if self.PQ > 0: - index_components = index_components + [ - '_disk.index_pq_pivots.bin', '_disk.index_pq_pivots.bin_centroid.bin', - '_disk.index_pq_pivots.bin_chunk_offsets.bin', '_disk.index_pq_pivots.bin_rearrangement_perm.bin' - ] - return index_components - - def index_files_to_store(self, dataset): - return [self.create_index_dir(DATASETS[dataset]()), self.index_name(), self.get_index_components(dataset)] - - def load_index(self, dataset): - """ - Load the index for dataset. Returns False if index - is not available, True otherwise. - - Checking the index usually involves the dataset name - and the index build paramters passed during construction. - """ - ds = DATASETS[dataset]() - if ds.distance() == "euclidean": - metric = diskannpy.L2 - elif ds.distance() == "ip": - metric = diskannpy.INNER_PRODUCT - else: - print("Unsuported distance function.") - return False - - if ds.dtype == "float32": - self.index = diskannpy.DiskANNFloatIndex(metric) - elif ds.dtype == "int8": - self.index = diskannpy.DiskANNInt8Index(metric) - elif ds.dtype == "uint8": - self.index = diskannpy.DiskANNUInt8Index(metric) - else: - print ("Unsupported data type.") - return False - - index_dir = self.create_index_dir(ds) - if not (os.path.exists(index_dir)) and 'url' not in self._index_params: - return False - - index_path = os.path.join(index_dir, self.index_name()) - index_components = self.get_index_components(dataset) - - for component in index_components: - index_file = index_path + component - if not (os.path.exists(index_file)): - if 'url' in self._index_params: - index_file_source = self._index_params['url'] + '/' + self.index_name() + component - print(f"Downloading index in background. This can take a while.") - download_accelerated(index_file_source, index_file, quiet=True) - else: - return False - - print("Loading index") - - if self.C > 0: - num_nodes_to_cache = self.C - else: - num_nodes_to_cache = int(ds.nb/1000) if ds.nb > 1000000 else int(ds.nb/100) - if (self.index.load_index(index_path, diskannpy.omp_get_max_threads(), num_nodes_to_cache, self.cache_mechanism) == 0): - print ("Load index success.") - return True - else: - return False - - def query(self, X, k): - """Carry out a batch query for k-NN of query set X.""" - nq, dim = (np.shape(X)) - [self.res, self.query_dists], self.stats = self.index.batch_search_numpy_input(X, dim, nq, k, self.Ls, self.BW, self.threads) - self.stats["dist_comps"] = self.stats["mean_dist_comps"] * nq - - def range_query(self, X, radius): - """ - Carry out a batch query for range search with - radius. - """ - nq, dim = np.shape(X) - [self.rangeres_lim, [self.rangeres_ids, self.rangeres_dists]], self.stats = self.index.batch_range_search_numpy_input( - X, dim, nq, radius, self.Lmin, self.Lmax, self.BW, self.threads) - self.stats["dist_comps"] = self.stats["mean_dist_comps"] * nq - - def get_range_results(self): - return (self.rangeres_lim, self.rangeres_dists, self.rangeres_ids) - - def get_additional(self): - return self.stats - - def set_query_arguments(self, query_args): - self._query_args = query_args - self.Ls = 0 if query_args.get("Ls") == None else query_args.get("Ls") - self.Lmin = 0 if query_args.get("Lmin") == None else query_args.get("Lmin") - self.Lmax = 0 if query_args.get("Lmax") == None else query_args.get("Lmax") - self.BW = self._query_args.get("BW") - self.threads = self._query_args.get("T") diff --git a/benchmark/algorithms/diskann-v0_3.py b/benchmark/algorithms/diskann-v0_3.py deleted file mode 100644 index d1b7e0281..000000000 --- a/benchmark/algorithms/diskann-v0_3.py +++ /dev/null @@ -1,222 +0,0 @@ -from __future__ import absolute_import -import psutil -import os -import time -import numpy as np -import diskannpy - -from benchmark.algorithms.base import BaseANN -from benchmark.datasets import DATASETS, download_accelerated - -class Diskann(BaseANN): - def __init__(self, metric, index_params): - self.name = "DiskANN-v0.3" - if (index_params.get("R")==None): - print("Error: missing parameter R") - return - if (index_params.get("L")==None): - print("Error: missing parameter L") - return - if (index_params.get("B")==None): - print("Error: missing parameter B") - return - if(index_params.get("M")==None): - print("Error: missing parameter M") - return - - self._index_params = index_params - self._metric = metric - - self.R = index_params.get("R") - self.L = index_params.get("L") - self.B = index_params.get("B") - self.M = index_params.get("M") - self.PQ = 0 if index_params.get("PQ") == None else index_params.get("PQ") - self.C = -1 if index_params.get("C") == None else index_params.get("C") - self.cache_mechanism = 1 if index_params.get("CM") == None else index_params.get("CM") - if self.C == 0: - self.cache_mechanism = 0 - print(self.PQ) - - def track(self): - return "T2" - - def index_name(self): - if self.PQ == 0: - return f"R{self.R}_L{self.L}_B{self.B}_M{self.M}" - else: - return f"R{self.R}_L{self.L}_B{self.B}_M{self.M}_PQ{self.PQ}" - - def create_index_dir(self, dataset): - index_dir = os.path.join(os.getcwd(), "data", "indices") - os.makedirs(index_dir, mode=0o777, exist_ok=True) - index_dir = os.path.join(index_dir, "T2") - os.makedirs(index_dir, mode=0o777, exist_ok=True) - index_dir = os.path.join(index_dir, self.__str__()) - os.makedirs(index_dir, mode=0o777, exist_ok=True) - index_dir = os.path.join(index_dir, dataset.short_name()) - os.makedirs(index_dir, mode=0o777, exist_ok=True) - index_dir = os.path.join(index_dir, self.index_name()) - os.makedirs(index_dir, mode=0o777, exist_ok=True) - return index_dir - - def fit(self, dataset): - """ - Build the index for the data points given in dataset name. - """ - - ds = DATASETS[dataset]() - d = ds.d - - buildthreads = self._index_params.get("buildthreads", -1) - if buildthreads == -1: - buildthreads = diskannpy.omp_get_max_threads() - - print("Set build-time number of threads:", buildthreads) - diskannpy.omp_set_num_threads(buildthreads) - - index_dir = self.create_index_dir(ds) - self.index_path = os.path.join(index_dir, self.index_name()) - - if ds.distance() == "euclidean": - metric = diskannpy.L2 - elif ds.distance() == "ip": - metric = diskannpy.INNER_PRODUCT - else: - print("Unsuported distance function.") - return False - - - if not hasattr(self, 'index'): - if ds.dtype == "float32": - self.index = diskannpy.DiskANNFloatIndex(metric) - elif ds.dtype == "int8": - self.index = diskannpy.DiskANNInt8Index(metric) - elif ds.dtype == "uint8": - self.index = diskannpy.DiskANNUInt8Index(metric) - else: - print ("Unsupported data type.") - return False - - start = time.time() - if self.PQ > 0: - self.index.build(ds.get_dataset_fn(), self.index_path, self.R, self.L, self.B, self.M, buildthreads, self.PQ) - else: - self.index.build(ds.get_dataset_fn(), self.index_path, self.R, self.L, self.B, self.M, buildthreads) - end = time.time() - print("DiskANN index built in %.3f s" % (end - start)) - - - if self.C > 0: - num_nodes_to_cache = self.C - else: - num_nodes_to_cache = int(ds.nb/1000) if ds.nb > 1000000 else int(ds.nb/100) - print(f"Loading index and caching {num_nodes_to_cache} nodes..") - self.index.load_index(self.index_path, diskannpy.omp_get_max_threads(), num_nodes_to_cache, self.cache_mechanism) - - def get_index_components(self, dataset): - index_components = [ - '_pq_pivots.bin', '_pq_pivots.bin_centroid.bin', '_pq_pivots.bin_chunk_offsets.bin', - '_pq_pivots.bin_rearrangement_perm.bin', '_sample_data.bin', '_sample_ids.bin', - '_pq_compressed.bin', '_disk.index', '_disk.index_medoids.bin' - ] - ds = DATASETS[dataset]() - if ds.distance() == "ip": - index_components = index_components + [ - '_disk.index_centroids.bin', '_disk.index_max_base_norm.bin', '_disk.index_medoids.bin' - ] - if self.PQ > 0: - index_components = index_components + [ - '_disk.index_pq_pivots.bin', '_disk.index_pq_pivots.bin_centroid.bin', - '_disk.index_pq_pivots.bin_chunk_offsets.bin', '_disk.index_pq_pivots.bin_rearrangement_perm.bin' - ] - return index_components - - def index_files_to_store(self, dataset): - return [self.create_index_dir(DATASETS[dataset]()), self.index_name(), self.get_index_components(dataset)] - - def load_index(self, dataset): - """ - Load the index for dataset. Returns False if index - is not available, True otherwise. - - Checking the index usually involves the dataset name - and the index build paramters passed during construction. - """ - ds = DATASETS[dataset]() - if ds.distance() == "euclidean": - metric = diskannpy.L2 - elif ds.distance() == "ip": - metric = diskannpy.INNER_PRODUCT - else: - print("Unsuported distance function.") - return False - - if ds.dtype == "float32": - self.index = diskannpy.DiskANNFloatIndex(metric) - elif ds.dtype == "int8": - self.index = diskannpy.DiskANNInt8Index(metric) - elif ds.dtype == "uint8": - self.index = diskannpy.DiskANNUInt8Index(metric) - else: - print ("Unsupported data type.") - return False - - index_dir = self.create_index_dir(ds) - if not (os.path.exists(index_dir)) and 'url' not in self._index_params: - return False - - index_path = os.path.join(index_dir, self.index_name()) - index_components = self.get_index_components(dataset) - - for component in index_components: - index_file = index_path + component - if not (os.path.exists(index_file)): - if 'url' in self._index_params: - index_file_source = self._index_params['url'] + '/' + self.index_name() + component - print(f"Downloading index in background. This can take a while.") - download_accelerated(index_file_source, index_file, quiet=True) - else: - return False - - print("Loading index") - - if self.C > 0: - num_nodes_to_cache = self.C - else: - num_nodes_to_cache = int(ds.nb/1000) if ds.nb > 1000000 else int(ds.nb/100) - if (self.index.load_index(index_path, diskannpy.omp_get_max_threads(), num_nodes_to_cache, self.cache_mechanism) == 0): - print ("Load index success.") - return True - else: - return False - - def query(self, X, k): - """Carry out a batch query for k-NN of query set X.""" - nq, dim = (np.shape(X)) - [self.res, self.query_dists], self.stats = self.index.batch_search_numpy_input(X, dim, nq, k, self.Ls, self.BW, self.threads) - self.stats["dist_comps"] = self.stats["mean_dist_comps"] * nq - - def range_query(self, X, radius): - """ - Carry out a batch query for range search with - radius. - """ - nq, dim = np.shape(X) - [self.rangeres_lim, [self.rangeres_ids, self.rangeres_dists]], self.stats = self.index.batch_range_search_numpy_input( - X, dim, nq, radius, self.Lmin, self.Lmax, self.BW, self.threads) - self.stats["dist_comps"] = self.stats["mean_dist_comps"] * nq - - def get_range_results(self): - return (self.rangeres_lim, self.rangeres_dists, self.rangeres_ids) - - def get_additional(self): - return self.stats - - def set_query_arguments(self, query_args): - self._query_args = query_args - self.Ls = 0 if query_args.get("Ls") == None else query_args.get("Ls") - self.Lmin = 0 if query_args.get("Lmin") == None else query_args.get("Lmin") - self.Lmax = 0 if query_args.get("Lmax") == None else query_args.get("Lmax") - self.BW = self._query_args.get("BW") - self.threads = self._query_args.get("T") diff --git a/benchmark/algorithms/elastiknn.py b/benchmark/algorithms/elastiknn.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/benchmark/algorithms/faiss_inmem.py b/benchmark/algorithms/faiss_inmem.py deleted file mode 100644 index 4c5803edb..000000000 --- a/benchmark/algorithms/faiss_inmem.py +++ /dev/null @@ -1,75 +0,0 @@ -from __future__ import absolute_import -#import sys -#sys.path.append("install/lib-faiss") # noqa -import numpy -import sklearn.preprocessing -import ctypes -import faiss -import os -from benchmark.algorithms.base import BaseANN -from benchmark.datasets import DATASETS - - -class Faiss(BaseANN): - def query(self, X, n): - if self._metric == 'angular': - X /= numpy.linalg.norm(X) - self.res = self.index.search(X.astype(numpy.float32), n) - - def get_results(self): - D, I = self.res - return I -# res = [] -# for i in range(len(D)): -# r = [] -# for l, d in zip(L[i], D[i]): -# if l != -1: -# r.append(l) -# res.append(r) -# return res - - -class FaissIVF(Faiss): - def __init__(self, metric, n_list): - self._n_list = n_list - self._metric = metric - - def index_name(self, name): - return f"data/ivf_{name}_{self._n_list}_{self._metric}" - - def fit(self, dataset): - X = DATASETS[dataset]().get_dataset() # assumes it fits into memory - - if self._metric == 'angular': - X = sklearn.preprocessing.normalize(X, axis=1, norm='l2') - - if X.dtype != numpy.float32: - X = X.astype(numpy.float32) - - self.quantizer = faiss.IndexFlatL2(X.shape[1]) - index = faiss.IndexIVFFlat( - self.quantizer, X.shape[1], self._n_list, faiss.METRIC_L2) - index.train(X) - index.add(X) - faiss.write_index(index, self.index_name(dataset)) - self.index = index - - def load_index(self, dataset): - if not os.path.exists(self.index_name(dataset)): - return False - - self.index = faiss.read_index(self.index_name(dataset)) - return True - - def set_query_arguments(self, n_probe): - faiss.cvar.indexIVF_stats.reset() - self._n_probe = n_probe - self.index.nprobe = self._n_probe - - def get_additional(self): - return {"dist_comps": faiss.cvar.indexIVF_stats.ndis + # noqa - faiss.cvar.indexIVF_stats.nq * self._n_list} - - def __str__(self): - return 'FaissIVF(n_list=%d, n_probe=%d)' % (self._n_list, - self._n_probe) diff --git a/benchmark/algorithms/faiss_t1.py b/benchmark/algorithms/faiss_t1.py deleted file mode 100644 index 75684c427..000000000 --- a/benchmark/algorithms/faiss_t1.py +++ /dev/null @@ -1,314 +0,0 @@ -from __future__ import absolute_import -import numpy as np -import sklearn.preprocessing -import ctypes -import faiss -import os -import time -from benchmark.algorithms.base import BaseANN -from benchmark.datasets import DATASETS, download_accelerated - -def knn_search_batched(index, xq, k, bs): - D, I = [], [] - for i0 in range(0, len(xq), bs): - Di, Ii = index.search(xq[i0:i0 + bs], k) - D.append(Di) - I.append(Ii) - return np.vstack(D), np.vstack(I) - -def unwind_index_ivf(index): - if isinstance(index, faiss.IndexPreTransform): - assert index.chain.size() == 1 - vt = faiss.downcast_VectorTransform(index.chain.at(0)) - index_ivf, vt2 = unwind_index_ivf(faiss.downcast_index(index.index)) - assert vt2 is None - return index_ivf, vt - if hasattr(faiss, "IndexRefine") and isinstance(index, faiss.IndexRefine): - return unwind_index_ivf(faiss.downcast_index(index.base_index)) - if isinstance(index, faiss.IndexIVF): - return index, None - else: - return None, None - -def two_level_clustering(xt, nc1, nc2, clustering_niter=25, spherical=False): - d = xt.shape[1] - - print(f"2-level clustering of {xt.shape} nb clusters = {nc1}*{nc2} = {nc1*nc2}") - print("perform coarse training") - - km = faiss.Kmeans( - d, nc1, verbose=True, niter=clustering_niter, - max_points_per_centroid=2000, - spherical=spherical - ) - km.train(xt) - - print() - - # coarse centroids - centroids1 = km.centroids - - print("assigning the training set") - t0 = time.time() - _, assign1 = km.assign(xt) - bc = np.bincount(assign1, minlength=nc1) - print(f"done in {time.time() - t0:.2f} s. Sizes of clusters {min(bc)}-{max(bc)}") - o = assign1.argsort() - del km - - # train sub-clusters - i0 = 0 - c2 = [] - t0 = time.time() - for c1 in range(nc1): - print(f"[{time.time() - t0:.2f} s] training sub-cluster {c1}/{nc1}\r", end="", flush=True) - i1 = i0 + bc[c1] - subset = o[i0:i1] - assert np.all(assign1[subset] == c1) - km = faiss.Kmeans(d, nc2, spherical=spherical) - xtsub = xt[subset] - km.train(xtsub) - c2.append(km.centroids) - i0 = i1 - print(f"done in {time.time() - t0:.2f} s") - return np.vstack(c2) - - -class Faiss(BaseANN): - def __init__(self, metric, index_params): - self._index_params = index_params - self._metric = metric - self._query_bs = -1 - self.indexkey = index_params.get("indexkey", "OPQ32_128,IVF65536_HNSW32,PQ32") - - if 'query_bs' in index_params: - self._query_bs = index_params['query_bs'] - - def track(self): - return "T1" - - def index_name(self, name): - return f"data/{name}.{self.indexkey}.faissindex" - - def fit(self, dataset): - index_params = self._index_params - - ds = DATASETS[dataset]() - d = ds.d - - # get build parameters - buildthreads = index_params.get("buildthreads", -1) - by_residual = index_params.get("by_residual", -1) - maxtrain = index_params.get("maxtrain", 0) - clustering_niter = index_params.get("clustering_niter", -1) - add_bs = index_params.get("add_bs", 100000) - add_splits = index_params.get("add_splits", 1) - efSearch = index_params.get("quantizer_add_efSearch", 80) - efConstruction = index_params.get("quantizer_efConstruction", 200) - use_two_level_clustering = index_params.get("two_level_clustering", True) - indexfile = self.index_name(dataset) - - if buildthreads == -1: - print("Build-time number of threads:", faiss.omp_get_max_threads()) - else: - print("Set build-time number of threads:", buildthreads) - faiss.omp_set_num_threads(buildthreads) - - metric_type = ( - faiss.METRIC_L2 if ds.distance() == "euclidean" else - faiss.METRIC_INNER_PRODUCT if ds.distance() in ("ip", "angular") else - 1/0 - ) - index = faiss.index_factory(d, self.indexkey, metric_type) - - index_ivf, vec_transform = unwind_index_ivf(index) - if vec_transform is None: - vec_transform = lambda x: x - else: - vec_transform = faiss.downcast_VectorTransform(vec_transform) - - if by_residual != -1: - by_residual = by_residual == 1 - print("setting by_residual = ", by_residual) - index_ivf.by_residual # check if field exists - index_ivf.by_residual = by_residual - - if index_ivf: - print("Update add-time parameters") - # adjust default parameters used at add time for quantizers - # because otherwise the assignment is inaccurate - quantizer = faiss.downcast_index(index_ivf.quantizer) - if isinstance(quantizer, faiss.IndexRefine): - print(" update quantizer k_factor=", quantizer.k_factor, end=" -> ") - quantizer.k_factor = 32 if index_ivf.nlist < 1e6 else 64 - print(quantizer.k_factor) - base_index = faiss.downcast_index(quantizer.base_index) - if isinstance(base_index, faiss.IndexIVF): - print(" update quantizer nprobe=", base_index.nprobe, end=" -> ") - base_index.nprobe = ( - 16 if base_index.nlist < 1e5 else - 32 if base_index.nlist < 4e6 else - 64) - print(base_index.nprobe) - elif isinstance(quantizer, faiss.IndexHNSW): - print(" update quantizer efSearch=", quantizer.hnsw.efSearch, end=" -> ") - if index_params.get("quantizer_add_efSearch", 80) > 0: - quantizer.hnsw.efSearch = efSearch - else: - quantizer.hnsw.efSearch = 40 if index_ivf.nlist < 4e6 else 64 - print(quantizer.hnsw.efSearch) - if efConstruction != -1: - print(" update quantizer efConstruction=", quantizer.hnsw.efConstruction, end=" -> ") - quantizer.hnsw.efConstruction = efConstruction - print(quantizer.hnsw.efConstruction) - - - index.verbose = True - if index_ivf: - index_ivf.verbose = True - index_ivf.quantizer.verbose = True - index_ivf.cp.verbose = True - - - if maxtrain == 0: - if 'IMI' in self.indexkey: - maxtrain = int(256 * 2 ** (np.log2(index_ivf.nlist) / 2)) - elif index_ivf: - maxtrain = 50 * index_ivf.nlist - else: - # just guess... - maxtrain = 256 * 100 - maxtrain = max(maxtrain, 256 * 100) - print("setting maxtrain to %d" % maxtrain) - - # train on dataset - print(f"getting first {maxtrain} dataset vectors for training") - - xt2 = next(ds.get_dataset_iterator(bs=maxtrain)) - - print("train, size", xt2.shape) - assert np.all(np.isfinite(xt2)) - - t0 = time.time() - - if (isinstance(vec_transform, faiss.OPQMatrix) and - isinstance(index_ivf, faiss.IndexIVFPQFastScan)): - print(" Forcing OPQ training PQ to PQ4") - ref_pq = index_ivf.pq - training_pq = faiss.ProductQuantizer( - ref_pq.d, ref_pq.M, ref_pq.nbits - ) - vec_transform.pq - vec_transform.pq = training_pq - - if clustering_niter >= 0: - print(("setting nb of clustering iterations to %d" % - clustering_niter)) - index_ivf.cp.niter = clustering_niter - - train_index = None - - if use_two_level_clustering: - sqrt_nlist = int(np.sqrt(index_ivf.nlist)) - assert sqrt_nlist ** 2 == index_ivf.nlist - - centroids_trainset = xt2 - if isinstance(vec_transform, faiss.VectorTransform): - print(" training vector transform") - vec_transform.train(xt2) - print(" transform trainset") - centroids_trainset = vec_transform.apply_py(centroids_trainset) - - centroids = two_level_clustering( - centroids_trainset, sqrt_nlist, sqrt_nlist, - spherical=(metric_type == faiss.METRIC_INNER_PRODUCT) - ) - - if not index_ivf.quantizer.is_trained: - print(" training quantizer") - index_ivf.quantizer.train(centroids) - - print(" add centroids to quantizer") - index_ivf.quantizer.add(centroids) - - index.train(xt2) - print(" Total train time %.3f s" % (time.time() - t0)) - - if train_index is not None: - del train_index - index_ivf.clustering_index = None - gc.collect() - - print("adding") - - t0 = time.time() - add_bs = index_params.get("add_bs", 10000000) - if add_bs == -1: - index.add(ds.get_database()) - else: - i0 = 0 - for xblock in ds.get_dataset_iterator(bs=add_bs): - i1 = i0 + len(xblock) - print(" adding %d:%d / %d [%.3f s, RSS %d kiB] " % ( - i0, i1, ds.nb, time.time() - t0, - faiss.get_mem_usage_kb())) - index.add(xblock) - i0 = i1 - - print(" add in %.3f s" % (time.time() - t0)) - print("storing", ) - faiss.write_index(index, self.index_name(dataset)) - - self.index = index - self.ps = faiss.ParameterSpace() - self.ps.initialize(self.index) - - def load_index(self, dataset): - if not os.path.exists(self.index_name(dataset)): - if 'url' not in self._index_params: - return False - - print('Downloading index in background. This can take a while.') - download_accelerated(self._index_params['url'], self.index_name(dataset), quiet=True) - - print("Loading index") - - self.index = faiss.read_index(self.index_name(dataset)) - - self.ps = faiss.ParameterSpace() - self.ps.initialize(self.index) - - return True - - def set_query_arguments(self, query_args): - faiss.cvar.indexIVF_stats.reset() - self.ps.set_index_parameters(self.index, query_args) - self.qas = query_args - - - # shall we return something interesting here? - def get_additional(self): - return {"dist_comps": faiss.cvar.indexIVF_stats.ndis} - - def __str__(self): - return f'FaissIVFPQ({self.qas})' - - - - def query(self, X, n): - if self._query_bs == -1: - self.res = self.index.search(X, n) - else: - self.res = knn_search_batched(self.index, X, n, self._query_bs) - - def range_query(self, X, radius): - if self._query_bs != -1: - raise NotImplemented - self.res = self.index.range_search(X, radius) - - def get_results(self): - D, I = self.res - return I - - def get_range_results(self): - return self.res diff --git a/benchmark/algorithms/faiss_t3.py b/benchmark/algorithms/faiss_t3.py deleted file mode 100644 index 622e121b1..000000000 --- a/benchmark/algorithms/faiss_t3.py +++ /dev/null @@ -1,475 +0,0 @@ -from __future__ import absolute_import -import numpy as np -import sklearn.preprocessing -import ctypes -import faiss -import os -import time -import gc -import resource -import threading -import json - -from multiprocessing.pool import ThreadPool - -from benchmark.algorithms.base import BaseANN -from benchmark.datasets import DATASETS, download_accelerated - -def unwind_index_ivf(index): - if isinstance(index, faiss.IndexPreTransform): - assert index.chain.size() == 1 - vt = index.chain.at(0) - index_ivf, vt2 = unwind_index_ivf(faiss.downcast_index(index.index)) - assert vt2 is None - return index_ivf, vt - if hasattr(faiss, "IndexRefine") and isinstance(index, faiss.IndexRefine): - return unwind_index_ivf(faiss.downcast_index(index.base_index)) - if isinstance(index, faiss.IndexIVF): - return index, None - else: - return None, None - -def rate_limited_iter(l): - 'a thread pre-processes the next element' - pool = ThreadPool(1) - res = None - - def next_or_None(): - try: - return next(l) - except StopIteration: - return None - - while True: - res_next = pool.apply_async(next_or_None) - if res is not None: - res = res.get() - if res is None: - return - yield res - res = res_next - -def build_index(buildthreads, by_residual, maxtrain, clustering_niter, - indexkey, indexfile, add_bs, add_splits, ds, train_on_gpu=True, quantizer_on_gpu_add=True): - - nq, d = ds.nq, ds.d - nb, d = ds.nq, ds.d - - if buildthreads == -1: - print("Build-time number of threads:", faiss.omp_get_max_threads()) - else: - print("Set build-time number of threads:", buildthreads) - faiss.omp_set_num_threads(buildthreads) - - metric_type = ( - faiss.METRIC_L2 if ds.distance() == "euclidean" else - faiss.METRIC_INNER_PRODUCT if ds.distance() in ("ip", "angular") else - 1/0 - ) - index = faiss.index_factory(d, indexkey, metric_type) - - index_ivf, vec_transform = unwind_index_ivf(index) - if vec_transform is None: - vec_transform = lambda x: x - else: - vec_transform = faiss.downcast_VectorTransform(vec_transform) - - if by_residual != -1: - by_residual = by_residual == 1 - index_ivf.by_residual # check if field exists - index_ivf.by_residual = by_residual - - if index_ivf: - print("Update add-time parameters") - # adjust default parameters used at add time for quantizers - # because otherwise the assignment is inaccurate - quantizer = faiss.downcast_index(index_ivf.quantizer) - if isinstance(quantizer, faiss.IndexRefine): - print(" update quantizer k_factor=", quantizer.k_factor, end=" -> ") - quantizer.k_factor = 32 if index_ivf.nlist < 1e6 else 64 - print(quantizer.k_factor) - base_index = faiss.downcast_index(quantizer.base_index) - if isinstance(base_index, faiss.IndexIVF): - print(" update quantizer nprobe=", base_index.nprobe, end=" -> ") - base_index.nprobe = ( - 16 if base_index.nlist < 1e5 else - 32 if base_index.nlist < 4e6 else - 64) - print(base_index.nprobe) - - index.verbose = True - if index_ivf: - index_ivf.verbose = True - index_ivf.quantizer.verbose = True - index_ivf.cp.verbose = True - - if maxtrain == 0: - if 'IMI' in indexkey: - maxtrain = int(256 * 2 ** (np.log2(index_ivf.nlist) / 2)) - elif index_ivf: - maxtrain = 50 * index_ivf.nlist - else: - # just guess... - maxtrain = 256 * 100 - maxtrain = max(maxtrain, 256 * 100) - print("setting maxtrain to %d" % maxtrain) - - # train on dataset - print(f"getting first {maxtrain} dataset vectors for training") - - xt2 = next(ds.get_dataset_iterator(bs=maxtrain)) - - print("train, size", xt2.shape) - assert np.all(np.isfinite(xt2)) - - t0 = time.time() - - if (isinstance(vec_transform, faiss.OPQMatrix) and - isinstance(index_ivf, faiss.IndexIVFPQFastScan)): - print(" Forcing OPQ training PQ to PQ4") - ref_pq = index_ivf.pq - training_pq = faiss.ProductQuantizer( - ref_pq.d, ref_pq.M, ref_pq.nbits - ) - vec_transform.pq - vec_transform.pq = training_pq - - if clustering_niter >= 0: - print(("setting nb of clustering iterations to %d" % - clustering_niter)) - index_ivf.cp.niter = clustering_niter - - train_index = None - if train_on_gpu: - print("add a training index on GPU") - train_index = faiss.index_cpu_to_all_gpus( - faiss.IndexFlatL2(index_ivf.d)) - index_ivf.clustering_index = train_index - - index.train(xt2) - print(" Total train time %.3f s" % (time.time() - t0)) - - if train_index is not None: - del train_index - index_ivf.clustering_index = None - gc.collect() - - print("adding") - - t0 = time.time() - - if not quantizer_on_gpu_add: - i0 = 0 - for xblock in ds.get_dataset_iterator(bs=add_bs): - i1 = i0 + len(xblock) - print(" adding %d:%d / %d [%.3f s, RSS %d kiB] " % ( - i0, i1, ds.nb, time.time() - t0, - faiss.get_mem_usage_kb())) - index.add(xblock) - i0 = i1 - elif True: - quantizer_gpu = faiss.index_cpu_to_all_gpus(index_ivf.quantizer) - - nsplit = add_splits - - def produce_batches(sno): - for xblock in ds.get_dataset_iterator(bs=add_bs, split=(nsplit, sno)): - _, assign = quantizer_gpu.search(xblock, 1) - yield xblock, assign.ravel() - - i0 = 0 - for sno in range(nsplit): - print(f"============== SPLIT {sno}/{nsplit}") - - stage2 = rate_limited_iter(produce_batches(sno)) - for xblock, assign in stage2: - i1 = i0 + len(xblock) - print(" adding %d:%d / %d [%.3f s, RSS %d kiB] " % ( - i0, i1, ds.nb, time.time() - t0, - faiss.get_mem_usage_kb())) - index.add_core( - len(xblock), - faiss.swig_ptr(xblock), - None, - faiss.swig_ptr(assign) - ) - i0 = i1 - del quantizer_gpu - gc.collect() - - print(" add in %.3f s" % (time.time() - t0)) - if indexfile: - print("storing", indexfile) - faiss.write_index(index, indexfile) - - return index - -class IndexQuantizerOnGPU: - """ run query quantization on GPU """ - - def __init__(self, index, search_bs): - self.search_bs = search_bs - index_ivf, vec_transform = unwind_index_ivf(index) - self.index_ivf = index_ivf - if vec_transform: -# print(type(vec_transform),dir(vec_transform)) - self.vec_transform = vec_transform.apply - else: - self.vec_transform = None - self.quantizer_gpu = faiss.index_cpu_to_all_gpus(self.index_ivf.quantizer) - - - def produce_batches(self, x, bs): - n = len(x) - nprobe = self.index_ivf.nprobe - ivf_stats = faiss.cvar.indexIVF_stats - for i0 in range(0, n, bs): - xblock = x[i0:i0 + bs] - t0 = time.time() - D, I = self.quantizer_gpu.search(xblock, nprobe) - ivf_stats.quantization_time += 1000 * (time.time() - t0) - yield i0, xblock, D, I - - - def search(self, x, k): - - if x.dtype!=np.float32: #GW- why do we need this now? - x = x.astype( np.float32 ) - - bs = self.search_bs - if self.vec_transform: - x = self.vec_transform(x) - nprobe = self.index_ivf.nprobe - n, d = x.shape - assert self.index_ivf.d == d - D = np.empty((n, k), dtype=np.float32) - I = np.empty((n, k), dtype=np.int64) - - sp = faiss.swig_ptr - stage2 = rate_limited_iter(self.produce_batches(x, bs)) - t0 = time.time() - for i0, xblock, Dc, Ic in stage2: - ni = len(xblock) - self.index_ivf.search_preassigned( - ni, faiss.swig_ptr(xblock), - k, sp(Ic), sp(Dc), - sp(D[i0:]), sp(I[i0:]), - False - ) - - return D, I - - def range_search(self, x, radius): - - x = x.astype( np.float32 ) #GW - why do we need this now? - - bs = self.search_bs - if self.vec_transform: - x = self.vec_transform(x) - nprobe = self.index_ivf.nprobe - n, d = x.shape - assert self.index_ivf.d == d - - sp = faiss.swig_ptr - rsp = faiss.rev_swig_ptr - stage2 = rate_limited_iter(self.produce_batches(x, bs)) - t0 = time.time() - all_res = [] - nres = 0 - for i0, xblock, Dc, Ic in stage2: - ni = len(xblock) - res = faiss.RangeSearchResult(ni) - - self.index_ivf.range_search_preassigned( - ni, faiss.swig_ptr(xblock), - radius, sp(Ic), sp(Dc), - res - ) - all_res.append((ni, res)) - lims = rsp(res.lims, ni + 1) - nres += lims[-1] - nres = int(nres) - lims = np.zeros(n + 1, int) - I = np.empty(nres, int) - D = np.empty(nres, 'float32') - - n0 = 0 - for ni, res in all_res: - lims_i = rsp(res.lims, ni + 1) - nd = int(lims_i[-1]) - Di = rsp(res.distances, nd) - Ii = rsp(res.labels, nd) - i0 = int(lims[n0]) - lims[n0: n0 + ni + 1] = lims_i + i0 - I[i0:i0 + nd] = Ii - D[i0:i0 + nd] = Di - n0 += ni - - return lims, D, I - - -class FaissT3(BaseANN): - def __init__(self, metric, index_params): - self._index_params = index_params - self._metric = metric - - def track(self): - return "T3" - - def index_name(self, name): - return f"data/{name}.{self._index_params['indexkey']}.faissindex" - - def fit(self, dataset): - index_params = self._index_params - - ds = DATASETS[dataset]() - d = ds.d - - # get build parameters - buildthreads = index_params.get("buildthreads", -1) - by_residual = index_params.get("by_residual", -1) - maxtrain = index_params.get("maxtrain", 0) - clustering_niter = index_params.get("clustering_niter", -1) - indexkey = index_params.get("indexkey", "IVF1048576,SQ8") - add_bs = index_params.get("add_bs", 100000) - add_splits = index_params.get("add_splits", 1) - indexfile = self.index_name(dataset) - - # determine how we use the GPU - #search_type = ds.search_type() - #if search_type == "knn": - # train_on_gpu = True - # quantizer_on_gpu_add = True - #else: #range - # train_on_gpu = False - # quantizer_on_gpu_add = False - - index = build_index(buildthreads, by_residual, maxtrain, clustering_niter, indexkey, - indexfile, add_bs, add_splits, ds) - - index_ivf, vec_transform = unwind_index_ivf(index) - if vec_transform is None: - vec_transform = lambda x: x - if index_ivf is not None: - print("imbalance_factor=", index_ivf.invlists.imbalance_factor()) - - no_precomputed_tables = index_params.get("no_precomputed_tables", True) - if no_precomputed_tables: - if isinstance(index_ivf, faiss.IndexIVFPQ): - print("disabling precomputed table") - index_ivf.use_precomputed_table = -1 - index_ivf.precomputed_table.clear() - - precomputed_table_size = 0 - if hasattr(index_ivf, 'precomputed_table'): - precomputed_table_size = index_ivf.precomputed_table.size() * 4 - print("precomputed tables size:", precomputed_table_size) - - searchthreads = index_params.get("searchthreads", -1) - if searchthreads == -1: - print("Search threads:", faiss.omp_get_max_threads()) - else: - print("Setting nb of threads to", searchthreads) - faiss.omp_set_num_threads(searchthreads) - - parallel_mode = index_params.get("parallel_mode", 3) - if parallel_mode != -1: - print("setting IVF parallel mode to", parallel_mode) - index_ivf.parallel_mode - index_ivf.parallel_mode = parallel_mode - - # prep for the searches - - self.ps = faiss.ParameterSpace() - self.ps.initialize(index) - - search_bs = index_params.get("search_bs", 8192) - index_wrap = IndexQuantizerOnGPU(index, search_bs) - - self.cpuindex = index - self.index = index_wrap - - def load_index(self, dataset): - - index_params = self._index_params - - if not os.path.exists(self.index_name(dataset)): - if 'url' not in self._index_params: - return False - - print('Downloading index in background. This can take a while.') - download_accelerated(self._index_params['url'], self.index_name(dataset), quiet=True) - - print("Loading index",self.index_name(dataset)) - - index = faiss.read_index(self.index_name(dataset)) - - index_ivf, vec_transform = unwind_index_ivf(index) - if vec_transform is None: - vec_transform = lambda x: x - if index_ivf is not None: - print("imbalance_factor=", index_ivf.invlists.imbalance_factor()) - - no_precomputed_tables = index_params.get("no_precomputed_tables", True) - if no_precomputed_tables: - if isinstance(index_ivf, faiss.IndexIVFPQ): - print("disabling precomputed table") - index_ivf.use_precomputed_table = -1 - index_ivf.precomputed_table.clear() - - precomputed_table_size = 0 - if hasattr(index_ivf, 'precomputed_table'): - precomputed_table_size = index_ivf.precomputed_table.size() * 4 - print("precomputed tables size:", precomputed_table_size) - - # prep for the searches - - searchthreads = index_params.get("searchthreads", -1) - if searchthreads == -1: - print("Search threads:", faiss.omp_get_max_threads()) - else: - print("Setting nb of threads to", searchthreads) - faiss.omp_set_num_threads(searchthreads) - - parallel_mode = index_params.get("parallel_mode", 3) - if parallel_mode != -1: - print("setting IVF parallel mode to", parallel_mode) - index_ivf.parallel_mode - index_ivf.parallel_mode = parallel_mode - - self.ps = faiss.ParameterSpace() - self.ps.initialize(index) - - search_bs = index_params.get("search_bs", 8092) - index_wrap = IndexQuantizerOnGPU(index, search_bs) - - self.cpuindex = index - self.index = index_wrap - - return True - - def set_query_arguments(self, query_args): - faiss.cvar.indexIVF_stats.reset() - self.ps.set_index_parameters(self.cpuindex, query_args) - self.qas = query_args - - - # shall we return something interesting here? - def get_additional(self): - return {"dist_comps": faiss.cvar.indexIVF_stats.ndis} - - def __str__(self): - return f'FaissIVFPQ({self.qas})' - - def query(self, X, n): - self.res = self.index.search(X, n) - - def range_query(self, X, radius): - self.res = self.index.range_search(X, radius) - - def get_results(self): - D, I = self.res - return I - - def get_range_results(self): - return self.res diff --git a/benchmark/algorithms/gemini.py b/benchmark/algorithms/gemini.py deleted file mode 100644 index ea261fd95..000000000 --- a/benchmark/algorithms/gemini.py +++ /dev/null @@ -1,279 +0,0 @@ -from __future__ import absolute_import -import numpy as np -import sklearn.preprocessing -import ctypes -import faiss -import os -import time -import ast -from tqdm import tqdm - -from benchmark.algorithms.base import BaseANN -from benchmark.datasets import DATASETS, download_accelerated - -# GSL stuff -import gdl_bindings as gdl -import gsl_bindings as gsl -from tmp_api import * -import gsl_utils -import gsld_bindings_rerank as gsld_rerank - -def convert_index_to_cluster_and_ids_lists(index, nbits): - cluster_list = np.empty(index.invlists.nlist, dtype=object) - ids_list = np.empty(index.invlists.nlist, dtype=object) - - zero_count = 0 - - for i in range(index.invlists.nlist): - list_sz = index.invlists.list_size(i) - - if list_sz == 0: - zero_count = zero_count + 1 - ids = None - else: - ids_ptr = index.invlists.get_ids(i) - ids = np.array(faiss.rev_swig_ptr(ids_ptr, list_sz)).reshape(-1, 1).astype(np.uint32) # GSL requires a 2d arrray for some reason - index.invlists.release_ids(ids_ptr) - # index.invlists.release_ids(list_sz, ids_ptr) - ids_list[i] = ids - - codes_ptr = index.invlists.get_codes(i) - codes = np.array(faiss.rev_swig_ptr(codes_ptr, list_sz * nbits // 8)).reshape(list_sz, nbits//8) - index.invlists.release_codes(codes_ptr) - # index.invlists.release_codes(list_sz * nbits // 8, codes_ptr) - cluster_list[i] = codes - - print('zero_count =', zero_count) - return cluster_list, ids_list - -def get_cluster_and_ids_lists(index, nbits): - print('Creating cluster + ids lists...') - ret = convert_index_to_cluster_and_ids_lists(index, nbits) - return ret - -def create_encoding(encoding_file_name, normalize=False): - print("FILE LOAD PATH", encoding_file_name) - single_np_array = np.load(encoding_file_name) - # contains 6 arrays in a particular order - layers = [NHEncoding.NHLayer(single_np_array[0], single_np_array[1].reshape(1, len(single_np_array[1]))), - NHEncoding.NHLayer(single_np_array[2], single_np_array[3].reshape(1, len(single_np_array[3]))), - NHEncoding.NHLayer(single_np_array[4], single_np_array[5].reshape(1, len(single_np_array[5])))] - [ print(l.shape) for l in single_np_array ] - return NHEncoding(layers, normalize) - -class GeminiT3(BaseANN): - def __init__(self, metric, index_params): - # GSL init - s = gdl.gdl_init() - if s: - raise Exception('gdl.gdl_init() failed with {}'.format(s)) - s, n_gdl_ctxs = gdl.gdl_context_count_get() - if s: - raise Exception('gdl.gdl_context_count_get() failed with {}'.format(s)) - - s, gdl_desc_list = gdl.gdl_context_desc_get(n_gdl_ctxs) - if s: - raise Exception('gdl.gdl_context_desc_get() failed with {}'.format(s)) - - gdl_ctx_ids = [desc.ctx_id for desc in gdl_desc_list if desc.status == gdl.GDL_CONTEXT_READY] - if not gdl_ctx_ids: - raise Exception("No valid context found") - - self._index_params = index_params - self._metric = metric - self.index_params = ast.literal_eval(index_params) - num_apuc = self.index_params['num_apuc'] - print("NUM_APUC", num_apuc) - self.gsl_ctx = Context(gdl_ctx_ids[:num_apuc], max_num_threads=56) - # GSL init end - - self.max_num_queries = 10000 - self.num_records = 1000000000 - - print(f'GSI GeminiT3(BaseANN){self.index_params}') - - def index_name(self, name): - nlist = self.index_params['nlist'] - qbits = self.index_params['qbits'] - nbits = self.index_params['nbits'] - nt = self.index_params['nt'] - is_f16 = self.index_params['f16'] - key = "nbits=%d,qbits=%d,nlist=%d,nt=%d,f16=%s" % (nbits, qbits, nlist, nt, str(is_f16)) - return f"data/{name}.{key}.geminiindex" - - def fit(self, dataset): - assert 0 - - def load_index(self, dataset): - - nlist = self.index_params['nlist'] - qbits = self.index_params['qbits'] - nbits = self.index_params['nbits'] - nt = self.index_params['nt'] - is_f16 = self.index_params['f16'] - - # number of centroids maps to an index subdir - centroids_dirs = { 524288: 'centroids_512k/', 2097152: 'centroids_2m/', 4194304: 'centroids_4m/'} - num_centroids_dir = centroids_dirs[nlist] - - # the index name is the parent folder of the index component files - prefix = self.index_name( dataset ) - - resources_path = '' - case_dir = '1b/' - resources_path_case = f'{resources_path}{case_dir}' - - fp_quantizer_file_name = f'{prefix}/{resources_path}{num_centroids_dir}Deep1B.nt{nt}.nlist{nlist}.quantizer' - records_encoding_file_name = f'{prefix}/{resources_path}records_weights/records_weights.bits{nbits}.npy' - centroids_encoding_file_name = f'{prefix}/{resources_path}{num_centroids_dir}centroids_weights.nt{nt}.nlist{nlist}.nbits{nbits}.npy' - index_file_name = f'{prefix}/{resources_path_case}Deep1B.ivfbinnh.nt{nt}.nlist{nlist}.nb{self.num_records}.bits{qbits}.index' - db_path = f'{prefix}/{resources_path_case}fdb.npy' - - print('********************** Paths ***************************') - print('fp_quantizer_file_name =', fp_quantizer_file_name) - if not os.path.isfile(fp_quantizer_file_name): - raise FileNotFoundError(fp_quantizer_file_name) - print('records_encoding_file_name =', records_encoding_file_name) - if not os.path.isfile(records_encoding_file_name): - raise FileNotFoundError(records_encoding_file_name) - print('centroids_encoding_file_name =', centroids_encoding_file_name) - if not os.path.isfile(centroids_encoding_file_name): - raise FileNotFoundError(centroids_encoding_file_name) - print('index_file_name =', index_file_name) - if not os.path.isfile(index_file_name): - raise FileNotFoundError(index_file_name) - print('db_path =', db_path) - if not os.path.isfile(db_path): - raise FileNotFoundError(db_path) - print('********************************************************') - - self.centroids_encoding = create_encoding(centroids_encoding_file_name, False) - print("centroids", centroids_encoding_file_name, self.centroids_encoding) - self.records_encoding = create_encoding(records_encoding_file_name, False) - print("records", records_encoding_file_name, self.records_encoding) - - print('load XF deep-1B') - num_features = 96 - dtype = gsld_rerank.GSLD_RERANK_DATA_TYPE_FLOAT - - print('init rerank...') - self.rerank = gsld_rerank.init(self.num_records, num_features, num_features * 4, dtype, gsld_rerank.GSLD_RERANK_ALGO_L2, is_f16, db_path) - print('finished init rerank') - - print(f'GSI loading index:{index_file_name}') - self.index = faiss.read_index_binary(index_file_name) - - # cluster_list, ids_list = get_cluster_and_ids_lists(self.index, nbits) - cluster_list, ids_list = get_cluster_and_ids_lists(self.index, qbits) - - print('creating GSL cluster binary DB...') - self.clstr_bdb = self.gsl_ctx.create_cluster_bdb(cluster_list, ids_list) - del cluster_list - del ids_list - - quantizer = faiss.downcast_IndexBinary(self.index.quantizer) - centroids = faiss.vector_to_array(quantizer.xb) - centroids = np.reshape(centroids, (quantizer.ntotal, quantizer.d//8)) - self.centroids_bdb = self.gsl_ctx.create_bdb(centroids) - del centroids - - l2_quantizer = faiss.read_index(fp_quantizer_file_name) - l2_centroids = faiss.vector_float_to_array(l2_quantizer.xb) - l2_centroids = np.reshape(l2_centroids, (nlist, l2_quantizer.d)) - print('centroids (float):', l2_centroids.shape, l2_centroids.dtype) - print('creating GSL centroids float DB...') - self.centroids_fdb = self.gsl_ctx.create_fdb(l2_centroids, False) - - self.centroids_encoding = create_encoding(centroids_encoding_file_name, False) - print("centroids", centroids_encoding_file_name, self.centroids_encoding) - self.records_encoding = create_encoding(records_encoding_file_name, False) - print("records", records_encoding_file_name, self.records_encoding) - - return True - - def set_query_arguments(self, query_args): - - #destroy previous runs' seesion - try: - print('destroying search session') - self.session_hdl.destroy() - except AttributeError: - print('no session to destroy') - - typical_num_queries = self.max_num_queries - - self.search_params = ast.literal_eval(query_args) - nprobe = self.search_params['nprobe'] - nprobe_refine = self.search_params['nprobe_refine'] - hamming_k = self.search_params['hamming_k'] - average_clstr_size_factor = self.search_params['average_clstr_size_factor'] - - print('--->', 'nprobe =', nprobe, 'nprobe_refine =', nprobe_refine, 'hamming_k =', hamming_k, - 'average_clstr_size_factor =', average_clstr_size_factor) - - rerank_desc = RerankDesc(self.centroids_fdb, nprobe_refine, gsl.GSL_ALG_KNN_L2_FDB) - - desc = ClusterHammingDesc(self.max_num_queries, - typical_num_queries, - self.centroids_bdb, - nprobe, - hamming_k, - rerank_desc, - self.centroids_encoding, - self.records_encoding, - self.clstr_bdb, - average_clstr_size_factor) - - self.session_hdl = self.gsl_ctx.create_session(desc) - print('Created GSL session') - self.gsl_ctx.search_in_focus(self.session_hdl) - print('Set GSL session in focus') - - def get_additional(self): - return {"dist_comps": faiss.cvar.indexIVF_stats.ndis} - - def __str__(self): - return f'GSI:{self.index_params} {self.search_params}' - - def query(self, X, n): - - print('Performing search on GSL') - out_shape = (X.shape[0], self.search_params['hamming_k']) - outputs = ClusterFlatOutputs(np.empty(out_shape, dtype=np.uint32), np.empty(out_shape, dtype=np.float32)) - out_indices, out_distances = self.gsl_ctx.search(ClusterInputs(X), outputs) - print('Finished search on GSL') - - print('run rerank...') - start = time.time() - res_idx, res_val = gsld_rerank.rerank(self.rerank, X, out_indices, n, 56) - end = time.time() - print('rerank time(milisec): ', (end - start) * 1000) - self.res = out_distances.astype(np.int32), res_idx.astype(np.int64) - - def range_query(self, X, radius): - print('in range query <-----') - - def get_results(self): - print('in get_results <-----') - D, I = self.res - return I - - def get_range_results(self): - print('in get_range_results <-----') - return self.res - - def __del__(self): - print('exit rerank...') - gsld_rerank.exit(self.rerank) - print('destroying search session') - self.session_hdl.destroy() - print('destroying centroids float DB') - self.centroids_fdb.destroy() - print('destroying centroids binary DB') - self.centroids_bdb.destroy() - print('destroying cluster binary DB') - self.clstr_bdb.destroy() - del self.gsl_ctx - s = gdl.gdl_exit() - if s: - raise Exception('gdl.gdl_exit failed with {}'.format(s)) diff --git a/benchmark/algorithms/httpann.py b/benchmark/algorithms/httpann.py deleted file mode 100644 index 36cbccfb5..000000000 --- a/benchmark/algorithms/httpann.py +++ /dev/null @@ -1,125 +0,0 @@ -import shlex -import sys -import time -from subprocess import Popen -from threading import Thread - -import numpy as np -import requests - -from benchmark.algorithms.base import BaseANN - - -class HttpANN(BaseANN): - """ - HTTP-based ANN algorithm. - Designed to enable language-agnostic ANN by delegating indexing and querying to a separate HTTP server. - - The HTTP server must satisfy the following API. - - | Method | Route | Request Body | Expected Status | Response Body | - | ------ | -------------------- | ---------------------------------------------------------------------------------------------------------- | --------------- | -------------------------------------------------------------------------- | - | POST | /init | dictionary of constructor arguments, e.g., {"metric": "euclidean", "dimension": 99 } | 200 | { } | - | POST | /load_index | { "dataset": } | 200 | { "load_index": } | - | POST | /fit | { "dataset": } | 200 | { } | - | POST | /set_query_arguments | dictionary of query arguments | 200 | { } | - | POST | /query | { "X": , "k": } | 200 | { } | - | POST | /range_query | { "X": , “radius”: } | 200 | { } | - | POST | /get_results | { } | 200 | { "get_results": } | - | POST | /get_additional | { } | 200 | { "get_additional": } | - | POST | /get_range_results | { } | 200 | { "get_range_results": } | - - Note that this is a 1:1 copy of the BaseANN Python Class API implemented as remote procedure calls. - """ - - def __init__(self, server_url: str, start_seconds: int, name: str, **kwargs): - """ - Base constructor for an HttpANN algorithm. - @param server_url: base URL for the server including port, e.g., "http:localhost:8080" - @param start_seconds: how many seconds to wait for the server to start before posting to the /init endpoint. - @param name: algorithm name - @param kwargs: any additional keyword arguments that will be passed through to the /init endpoint. - """ - self.server_url = server_url - self.name = name - - # Used by get_results method, defined in query method. - self.res = [] - - # Let the server start and post to init. - time.sleep(start_seconds) - self.post("init", kwargs, 200) - - def post(self, path: str, body: dict, expected_status: int) -> dict: - url = f"{self.server_url}/{path}" - res = requests.post(url, json=body) - if res.status_code != expected_status: - raise HttpANNResponseError(url, expected_status, res.status_code) - return res.json() - - def fit(self, dataset): - body = dict(dataset=dataset) - self.post("fit", body, 200) - - def load_index(self, dataset): - body = dict(dataset=dataset) - json = self.post("load_index", body, 200) - return json["load_index"] - - def query(self, X, k): - body = dict(X=[arr.tolist() for arr in X], k=k) - self.post("query", body, 200) - - def range_query(self, X, radius): - body = dict(X=[arr.tolist() for arr in X], radius=radius) - self.post("range_query", body, 200) - - def get_results(self): - json = self.post("get_results", dict(), 200) - return np.array(json["get_results"]) - - def get_range_results(self): - json = self.post("get_range_results", dict(), 200) - [lims, I, D] = json["get_range_results"] - return np.array(lims, 'int32'), np.array(I, 'int32'), np.array(D, 'float32') - - def get_additional(self): - json = self.post("get_additional", dict(), 200) - return json["get_additional"] - - def set_query_arguments(self, *query_args): - body = dict(query_args=query_args) - self.post("set_query_arguments", body, 200) - - -class HttpANNError(RuntimeError): - """Custom error type""" - pass - - -class HttpANNResponseError(HttpANNError): - """Custom error type""" - - def __init__(self, endpoint: str, expected_status: int, actual_status: int): - super(HttpANNError, self).__init__(f"Endpoint {endpoint} expected {expected_status} but got {actual_status}") - - -class HttpANNSubprocess(object): - """ - Helper class to start the HTTP server as a local subprocess. - Starts a background thread to monitor the subprocess by checking for an exit code once per second. - If the background thread finds an exit code, it will raise an HttpANNError. - """ - - def __init__(self, server_subprocess_command: str): - proc = Popen(shlex.split(server_subprocess_command), stdout=sys.stdout, stderr=sys.stderr) - - def monitor(): - while True: - time.sleep(1) - poll = proc.poll() - if poll is not None: - raise HttpANNError(f"HTTP server subprocess prematurely returned status code {poll}.") - - t = Thread(target=monitor, args=(), daemon=True) - t.start() diff --git a/benchmark/algorithms/httpann_example.py b/benchmark/algorithms/httpann_example.py deleted file mode 100644 index a6b876be0..000000000 --- a/benchmark/algorithms/httpann_example.py +++ /dev/null @@ -1,154 +0,0 @@ -import numpy as np -from flask import Flask, request, jsonify -from sklearn.neighbors import NearestNeighbors - -from benchmark.datasets import DATASETS -from .httpann import HttpANN, HttpANNSubprocess - - -class HttpANNExampleAlgorithm(HttpANN, HttpANNSubprocess): - """ - ANN algorithm that serves as a standard "algorithm" (callable from runner.py) and manages an HTTP server that - implements the actual indexing and query processing algorithms. - - By implementing HttpANNSubprocess, it starts a local server (which is implemented further below in the same file). - By implementing HttpANN, it can be used by runner.py to make ANN requests from Python to the local server. - - Obviously this is a contrived setup, as the actual algorithm is also implemented in Python. - It's purely as an example of how one might run an algorithm from another language by using an HTTP server to - implement the server API expected by HttpANN. - """ - - def __init__(self, metric: str, dimension: int, use_dims: float): - HttpANNSubprocess.__init__(self, "python3 -m benchmark.algorithms.httpann_example example") - HttpANN.__init__(self, server_url="http://localhost:8080", start_seconds=3, - name=f"http-ann-example-{metric}-{use_dims}", metric=metric, dimension=dimension, - use_dims=use_dims) - - -# Starts a local flask server that adheres to the HttpANN API and delegates the work to a local ANN algorithm. -def main(): - class SimpleANNAlgo(object): - """ - Very simple ANN algorithm intended only to demonstrate the HttpANN functionality. - This algorithm is instantiated and called from the example server below. - The algorithm is approximate in the sense that it uses exact KNN constrained to a configurable subset of the - highest variance dimensions. For example, if dimensions=100 and use_dims=0.22, the algorithm picks the 22 - dimensions with the highest variance and use them for exact KNN. - """ - - def __init__(self, metric: str, dimension: int, use_dims: float = 0.1): - self.metric = metric - self.dimension = dimension - self.use_dims = use_dims - self.knn = None - self.high_variance_dims = None - self.res = None - - def fit(self, dataset): - ds = DATASETS[dataset]() - arr = ds.get_dataset() - var = arr.var(axis=0) - num_dims = int(self.use_dims * arr.shape[1]) - self.high_variance_dims = np.argsort(var)[-num_dims:] - self.knn = NearestNeighbors(algorithm='brute', metric=self.metric) - self.knn.fit(arr[:, self.high_variance_dims]) - - def load_index(self, dataset): - # Always returns false because the index is not stored. - return False - - def query(self, X, k): - self.res = self.knn.kneighbors(X[:, self.high_variance_dims], n_neighbors=k, return_distance=False) - - def range_query(self, X, radius): - nbrs, dsts = self.knn.radius_neighbors(X[:, self.high_variance_dims], radius=radius, return_distance=True) - total = sum(map(len, nbrs)) - lims = np.zeros(len(X) + 1, 'int32') - I = np.zeros(total, 'int32') - D = np.zeros(total, 'float32') - for i in range(len(X)): - lims[i + 1] = lims[i] + len(nbrs[i]) - I[lims[i]:lims[i + 1]] = nbrs[i] - D[lims[i]:lims[i + 1]] = dsts[i] - self.res = (lims, I, D) - - def get_results(self): - return self.res - - def get_range_results(self): - return self.res - - def get_additional(self): - return {} - - app = Flask(__name__) - - # Algorithm is instantiated later but needs to be attached to an object. - app.algo = None - - @app.route("/status", methods=['GET']) - def status(): - return jsonify(dict()), 200 - - @app.route("/init", methods=['POST']) - def init(): - app.algo = SimpleANNAlgo(**request.json) - return jsonify(dict()), 200 - - @app.route("/load_index", methods=['POST']) - def load_index(): - b = app.algo.load_index(**request.json) - return jsonify(dict(load_index=b)), 200 - - @app.route("/fit", methods=['POST']) - def fit(): - app.algo.fit(**request.json) - return jsonify(dict()), 200 - - @app.route("/set_query_arguments", methods=['POST']) - def set_query_arguments(): - app.algo.set_query_arguments(**request.json) - return jsonify(dict()), 200 - - @app.route("/query", methods=['POST']) - def query(): - j = request.json - app.algo.query(np.array(j['X']), j['k']) - return jsonify(dict()), 200 - - @app.route("/range_query", methods=['POST']) - def range_query(): - j = request.json - app.algo.range_query(np.array(j['X']), j['radius']) - return jsonify(dict()), 200 - - @app.route("/get_results", methods=['POST']) - def get_results(): - neighbors = [arr.tolist() for arr in app.algo.res] - return jsonify(dict(get_results=neighbors)), 200 - - @app.route("/get_range_results", methods=['POST']) - def get_range_results(): - lims, I, D = app.algo.get_range_results() - res = [ - lims.tolist(), - [arr.tolist() for arr in I], - [arr.tolist() for arr in D] - ] - return jsonify(dict(get_range_results=res)), 200 - - @app.route("/get_additional", methods=['POST']) - def get_additional(): - return jsonify(dict(get_additional=app.algo.get_additional())), 200 - - app.run('0.0.0.0', 8080, debug=False) - # We could also use gevent/wsgi for a more professional setup. - # https://flask.palletsprojects.com/en/2.0.x/deploying/wsgi-standalone/#gevent - # from gevent.pywsgi import WSGIServer - # http_server = WSGIServer(('0.0.0.0', 8080), app) - # http_server.serve_forever() - - -if __name__ == "__main__": - main() diff --git a/benchmark/datasets.py b/benchmark/datasets.py deleted file mode 100644 index 368e646b2..000000000 --- a/benchmark/datasets.py +++ /dev/null @@ -1,737 +0,0 @@ -import math -import numpy -import os -import random -import sys -import struct -import time - -import numpy as np - -from urllib.request import urlopen -from urllib.request import urlretrieve - -BASEDIR = "data/" - -def download(src, dst=None, max_size=None): - """ download an URL, possibly cropped """ - if os.path.exists(dst): - return - print('downloading %s -> %s...' % (src, dst)) - if max_size is not None: - print(" stopping at %d bytes" % max_size) - t0 = time.time() - outf = open(dst, "wb") - inf = urlopen(src) - info = dict(inf.info()) - content_size = int(info['Content-Length']) - bs = 1 << 20 - totsz = 0 - while True: - block = inf.read(bs) - elapsed = time.time() - t0 - print( - " [%.2f s] downloaded %.2f MiB / %.2f MiB at %.2f MiB/s " % ( - elapsed, - totsz / 2**20, content_size / 2**20, - totsz / 2**20 / elapsed), - flush=True, end="\r" - ) - if not block: - break - if max_size is not None and totsz + len(block) >= max_size: - block = block[:max_size - totsz] - outf.write(block) - totsz += len(block) - break - outf.write(block) - totsz += len(block) - print() - print("download finished in %.2f s, total size %d bytes" % ( - time.time() - t0, totsz - )) - - -def download_accelerated(src, dst, quiet=False, sas_string=""): - """ dowload using an accelerator. Make sure the executable is in the path """ - print('downloading %s -> %s...' % (src, dst)) - if "windows.net" in src: - if sas_string == "": - cmd = f"azcopy copy {src} {dst}" - else: - cmd = f"azcopy copy '{src}?{sas_string}' '{dst}'" - else: - cmd = f"axel --alternate -n 10 {src} -o {dst}" - if quiet: - cmd += " -q" - - print("running", cmd) - ret = os.system(cmd) - assert ret == 0 - -def upload_accelerated(local_dir, blob_prefix, component, sas_string, quiet=False): - """ Upload index component to Azure blob using SAS string""" - src = os.path.join(local_dir, component) - dst = blob_prefix + '/' + component + '?' + sas_string - print('Uploading %s -> %s...' % (src, dst)) - - cmd = f"azcopy copy '{src}' '{dst}'" - print("running", cmd) - ret = os.system(cmd) - assert ret == 0 - - -def bvecs_mmap(fname): - x = numpy.memmap(fname, dtype='uint8', mode='r') - d = x[:4].view('int32')[0] - return x.reshape(-1, d + 4)[:, 4:] - -def ivecs_read(fname): - a = numpy.fromfile(fname, dtype='int32') - d = a[0] - return a.reshape(-1, d + 1)[:, 1:].copy() - -def xbin_mmap(fname, dtype, maxn=-1): - """ mmap the competition file format for a given type of items """ - n, d = map(int, np.fromfile(fname, dtype="uint32", count=2)) - - # HACK - to handle improper header in file for private deep-1B - # if override_d and override_d != d: - # print("Warning: xbin_mmap map returned d=%s, but overridig with %d" % (d, override_d)) - # d = override_d - # HACK - - assert os.stat(fname).st_size == 8 + n * d * np.dtype(dtype).itemsize - if maxn > 0: - n = min(n, maxn) - return np.memmap(fname, dtype=dtype, mode="r", offset=8, shape=(n, d)) - -def range_result_read(fname): - """ read the range search result file format """ - f = open(fname, "rb") - nq, total_res = np.fromfile(f, count=2, dtype="int32") - nres = np.fromfile(f, count=nq, dtype="int32") - assert nres.sum() == total_res - I = np.fromfile(f, count=total_res, dtype="int32") - D = np.fromfile(f, count=total_res, dtype="float32") - return nres, I, D - -def knn_result_read(fname): - n, d = map(int, np.fromfile(fname, dtype="uint32", count=2)) - assert os.stat(fname).st_size == 8 + n * d * (4 + 4) - f = open(fname, "rb") - f.seek(4+4) - I = np.fromfile(f, dtype="int32", count=n * d).reshape(n, d) - D = np.fromfile(f, dtype="float32", count=n * d).reshape(n, d) - return I, D - -def read_fbin(filename, start_idx=0, chunk_size=None): - """ Read *.fbin file that contains float32 vectors - Args: - :param filename (str): path to *.fbin file - :param start_idx (int): start reading vectors from this index - :param chunk_size (int): number of vectors to read. - If None, read all vectors - Returns: - Array of float32 vectors (numpy.ndarray) - """ - with open(filename, "rb") as f: - nvecs, dim = np.fromfile(f, count=2, dtype=np.int32) - nvecs = (nvecs - start_idx) if chunk_size is None else chunk_size - arr = np.fromfile(f, count=nvecs * dim, dtype=np.float32, - offset=start_idx * 4 * dim) - return arr.reshape(nvecs, dim) - - -def read_ibin(filename, start_idx=0, chunk_size=None): - """ Read *.ibin file that contains int32 vectors - Args: - :param filename (str): path to *.ibin file - :param start_idx (int): start reading vectors from this index - :param chunk_size (int): number of vectors to read. - If None, read all vectors - Returns: - Array of int32 vectors (numpy.ndarray) - """ - with open(filename, "rb") as f: - nvecs, dim = np.fromfile(f, count=2, dtype=np.int32) - nvecs = (nvecs - start_idx) if chunk_size is None else chunk_size - arr = np.fromfile(f, count=nvecs * dim, dtype=np.int32, - offset=start_idx * 4 * dim) - return arr.reshape(nvecs, dim) - - -def sanitize(x): - return numpy.ascontiguousarray(x, dtype='float32') - - -class Dataset(): - def prepare(self): - """ - Download and prepare dataset, queries, groundtruth. - """ - pass - def get_dataset_fn(self): - """ - Return filename of dataset file. - """ - pass - def get_dataset(self): - """ - Return memmapped version of the dataset. - """ - pass - def get_dataset_iterator(self, bs=512, split=(1, 0)): - """ - Return iterator over blocks of dataset of size at most 512. - The split argument takes a pair of integers (n, p) where p = 0..n-1 - The dataset is split in n shards, and the iterator returns only shard #p - This makes it possible to process the dataset independently from several - processes / threads. - """ - pass - def get_queries(self): - """ - Return (nq, d) array containing the nq queries. - """ - pass - def get_private_queries(self): - """ - Return (private_nq, d) array containing the private_nq private queries. - """ - pass - def get_groundtruth(self, k=None): - """ - Return (nq, k) array containing groundtruth indices - for each query.""" - pass - - def search_type(self): - """ - "knn" or "range" - """ - pass - - def distance(self): - """ - "euclidean" or "ip" or "angular" - """ - pass - - def default_count(self): - return 10 - - def short_name(self): - return f"{self.__class__.__name__}-{self.nb}" - - def __str__(self): - return ( - f"Dataset {self.__class__.__name__} in dimension {self.d}, with distance {self.distance()}, " - f"search_type {self.search_type()}, size: Q {self.nq} B {self.nb}") - - -############################################################################# -# Datasets for the competition -############################################################################## - - - -class DatasetCompetitionFormat(Dataset): - """ - Dataset in the native competition format, that is able to read the - files in the https://big-ann-benchmarks.com/ page. - The constructor should set all fields. The functions below are generic. - - For the 10M versions of the dataset, the database files are downloaded in - part and stored with a specific suffix. This is to avoid having to maintain - two versions of the file. - """ - - def prepare(self, skip_data=False): - if not os.path.exists(self.basedir): - os.makedirs(self.basedir) - - # start with the small ones... - for fn in [self.qs_fn, self.gt_fn]: - if fn is None: - continue - if fn.startswith("https://"): - sourceurl = fn - outfile = os.path.join(self.basedir, fn.split("/")[-1]) - else: - sourceurl = os.path.join(self.base_url, fn) - outfile = os.path.join(self.basedir, fn) - if os.path.exists(outfile): - print("file %s already exists" % outfile) - continue - download(sourceurl, outfile) - - # private qs url - if self.private_qs_url: - outfile = os.path.join(self.basedir, self.private_qs_url.split("/")[-1]) - if os.path.exists(outfile): - print("file %s already exists" % outfile) - else: - download(self.private_qs_url, outfile) - - # private gt url - if self.private_gt_url: - outfile = os.path.join(self.basedir, self.private_gt_url.split("/")[-1]) - if os.path.exists(outfile): - print("file %s already exists" % outfile) - else: - download(self.private_gt_url, outfile) - - if skip_data: - return - - fn = self.ds_fn - sourceurl = os.path.join(self.base_url, fn) - outfile = os.path.join(self.basedir, fn) - if os.path.exists(outfile): - print("file %s already exists" % outfile) - return - if self.nb == 10**9: - download_accelerated(sourceurl, outfile) - else: - # download cropped version of file - file_size = 8 + self.d * self.nb * np.dtype(self.dtype).itemsize - outfile = outfile + '.crop_nb_%d' % self.nb - if os.path.exists(outfile): - print("file %s already exists" % outfile) - return - download(sourceurl, outfile, max_size=file_size) - # then overwrite the header... - header = np.memmap(outfile, shape=2, dtype='uint32', mode="r+") - assert header[0] == 10**9 - assert header[1] == self.d - header[0] = self.nb - - def get_dataset_fn(self): - fn = os.path.join(self.basedir, self.ds_fn) - if os.path.exists(fn): - return fn - if self.nb != 10**9: - fn += '.crop_nb_%d' % self.nb - return fn - else: - raise RuntimeError("file not found") - - def get_dataset_iterator(self, bs=512, split=(1,0)): - nsplit, rank = split - i0, i1 = self.nb * rank // nsplit, self.nb * (rank + 1) // nsplit - filename = self.get_dataset_fn() - x = xbin_mmap(filename, dtype=self.dtype, maxn=self.nb) - assert x.shape == (self.nb, self.d) - for j0 in range(i0, i1, bs): - j1 = min(j0 + bs, i1) - yield sanitize(x[j0:j1]) - - def search_type(self): - return "knn" - - def get_groundtruth(self, k=None): - assert self.gt_fn is not None - fn = self.gt_fn.split("/")[-1] # in case it's a URL - assert self.search_type() == "knn" - - I, D = knn_result_read(os.path.join(self.basedir, fn)) - assert I.shape[0] == self.nq - if k is not None: - assert k <= 100 - I = I[:, :k] - D = D[:, :k] - return I, D - - def get_dataset(self): - assert self.nb <= 10**7, "dataset too large, use iterator" - return sanitize(next(self.get_dataset_iterator(bs=self.nb))) - - def get_queries(self): - filename = os.path.join(self.basedir, self.qs_fn) - x = xbin_mmap(filename, dtype=self.dtype) - assert x.shape == (self.nq, self.d) - return sanitize(x) - - def get_private_queries(self): - assert self.private_qs_url is not None - fn = self.private_qs_url.split("/")[-1] # in case it's a URL - filename = os.path.join(self.basedir, fn) - x = xbin_mmap(filename, dtype=self.dtype) - assert x.shape == (self.private_nq, self.d) - return sanitize(x) - - def get_private_groundtruth(self, k=None): - assert self.private_gt_url is not None - fn = self.private_gt_url.split("/")[-1] # in case it's a URL - assert self.search_type() == "knn" - - I, D = knn_result_read(os.path.join(self.basedir, fn)) - assert I.shape[0] == self.private_nq - if k is not None: - assert k <= 100 - I = I[:, :k] - D = D[:, :k] - return I, D - -subset_url = "https://dl.fbaipublicfiles.com/billion-scale-ann-benchmarks/" - -class SSNPPDataset(DatasetCompetitionFormat): - def __init__(self, nb_M=1000): - # assert nb_M in (10, 1000) - self.nb_M = nb_M - self.nb = 10**6 * nb_M - self.d = 256 - self.nq = 100000 - self.dtype = "uint8" - self.ds_fn = "FB_ssnpp_database.u8bin" - self.qs_fn = "FB_ssnpp_public_queries.u8bin" - self.gt_fn = ( - "FB_ssnpp_public_queries_1B_GT.rangeres" if self.nb_M == 1000 else - subset_url + "GT_100M/ssnpp-100M" if self.nb_M == 100 else - subset_url + "GT_10M/ssnpp-10M" if self.nb_M == 10 else - None - ) - - self.base_url = "https://dl.fbaipublicfiles.com/billion-scale-ann-benchmarks/" - self.basedir = os.path.join(BASEDIR, "FB_ssnpp") - - self.private_nq = 100000 - self.private_qs_url = "https://dl.fbaipublicfiles.com/billion-scale-ann-benchmarks/FB_ssnpp_heldout_queries_3307fba121460a56.u8bin" - self.private_gt_url = "https://dl.fbaipublicfiles.com/billion-scale-ann-benchmarks/GT_1B_final_2bf4748c7817/FB_ssnpp.bin" - - def search_type(self): - return "range" - - def default_count(self): - return 96237 - - def distance(self): - return "euclidean" - - def get_groundtruth(self, k=None): - """ override the ground-truth function as this is the only range search dataset """ - assert self.gt_fn is not None - fn = self.gt_fn.split("/")[-1] # in case it's a URL - return range_result_read(os.path.join(self.basedir, fn)) - - def get_private_groundtruth(self, k=None): - """ override the ground-truth function as this is the only range search dataset """ - assert self.private_gt_url is not None - fn = self.private_gt_url.split("/")[-1] # in case it's a URL - return range_result_read(os.path.join(self.basedir, fn)) - -class BigANNDataset(DatasetCompetitionFormat): - def __init__(self, nb_M=1000): - self.nb_M = nb_M - self.nb = 10**6 * nb_M - self.d = 128 - self.nq = 10000 - self.dtype = "uint8" - self.ds_fn = "base.1B.u8bin" - self.qs_fn = "query.public.10K.u8bin" - self.gt_fn = ( - "GT.public.1B.ibin" if self.nb_M == 1000 else - subset_url + "GT_100M/bigann-100M" if self.nb_M == 100 else - subset_url + "GT_10M/bigann-10M" if self.nb_M == 10 else - None - ) - # self.gt_fn = "https://comp21storage.blob.core.windows.net/publiccontainer/comp21/bigann/public_query_gt100.bin" if self.nb == 10**9 else None - self.base_url = "https://dl.fbaipublicfiles.com/billion-scale-ann-benchmarks/bigann/" - self.basedir = os.path.join(BASEDIR, "bigann") - - self.private_nq = 10000 - self.private_qs_url = "https://dl.fbaipublicfiles.com/billion-scale-ann-benchmarks/bigann/query.private.799253207.10K.u8bin" - self.private_gt_url = "https://dl.fbaipublicfiles.com/billion-scale-ann-benchmarks/GT_1B_final_2bf4748c7817/bigann-1B.bin" - - - def distance(self): - return "euclidean" - -class Deep1BDataset(DatasetCompetitionFormat): - def __init__(self, nb_M=1000): - self.nb_M = nb_M - self.nb = 10**6 * nb_M - self.d = 96 - self.nq = 10000 - self.dtype = "float32" - self.ds_fn = "base.1B.fbin" - self.qs_fn = "query.public.10K.fbin" - self.gt_fn = ( - "https://storage.yandexcloud.net/yandex-research/ann-datasets/deep_new_groundtruth.public.10K.bin" if self.nb_M == 1000 else - subset_url + "GT_100M/deep-100M" if self.nb_M == 100 else - subset_url + "GT_10M/deep-10M" if self.nb_M == 10 else - None - ) - self.base_url = "https://storage.yandexcloud.net/yandex-research/ann-datasets/DEEP/" - self.basedir = os.path.join(BASEDIR, "deep1b") - - self.private_nq = 30000 - self.private_qs_url = "https://comp21storage.blob.core.windows.net/publiccontainer/comp21/deep1b/query.heldout.30K.fbin" - self.private_gt_url = "https://comp21storage.blob.core.windows.net/publiccontainer/comp21/deep1b/gt100-heldout.30K.fbin" - - self.private_nq_large = 1000000 - self.private_qs_large_url = "https://storage.yandexcloud.net/yr-secret-share/ann-datasets-5ac0659e27/DEEP/query.private.1M.fbin" - - def distance(self): - return "euclidean" - - - -class Text2Image1B(DatasetCompetitionFormat): - def __init__(self, nb_M=1000): - self.nb_M = nb_M - self.nb = 10**6 * nb_M - self.d = 200 - self.nq = 100000 - self.dtype = "float32" - self.ds_fn = "base.1B.fbin" - self.qs_fn = "query.public.100K.fbin" - self.gt_fn = ( - "https://storage.yandexcloud.net/yandex-research/ann-datasets/t2i_new_groundtruth.public.100K.bin" if self.nb_M == 1000 else - subset_url + "GT_100M/text2image-100M" if self.nb_M == 100 else - subset_url + "GT_10M/text2image-10M" if self.nb_M == 10 else - None - ) - self.base_url = "https://storage.yandexcloud.net/yandex-research/ann-datasets/T2I/" - self.basedir = os.path.join(BASEDIR, "text2image1B") - - self.private_nq = 30000 - self.private_qs_url = "https://comp21storage.blob.core.windows.net/publiccontainer/comp21/text2image1b/query.heldout.30K.fbin" - self.private_gt_url = "https://comp21storage.blob.core.windows.net/publiccontainer/comp21/text2image1b/gt100-heldout.30K.fbin" - - self.private_nq_large = 1000000 - self.private_qs_large_url = "https://storage.yandexcloud.net/yr-secret-share/ann-datasets-5ac0659e27/T2I/query.private.1M.fbin" - - def distance(self): - return "ip" - - def get_query_train(self, maxn=10**6): - xq_train = np.memmap( - BASEDIR + "/text2image1B/query.learn.50M.fbin", offset=8, - dtype='float32', shape=(maxn, 200), mode='r') - return np.array(xq_train) - -class MSTuringANNS(DatasetCompetitionFormat): - def __init__(self, nb_M=1000): - self.nb_M = nb_M - self.nb = 10**6 * nb_M - self.d = 100 - self.nq = 100000 - self.dtype = "float32" - self.ds_fn = "base1b.fbin" - self.qs_fn = "query100K.fbin" - self.gt_fn = ( - "query_gt100.bin" if self.nb_M == 1000 else - subset_url + "GT_100M/msturing-100M" if self.nb_M == 100 else - subset_url + "GT_10M/msturing-10M" if self.nb_M == 10 else - None - ) - self.base_url = "https://comp21storage.blob.core.windows.net/publiccontainer/comp21/MSFT-TURING-ANNS/" - self.basedir = os.path.join(BASEDIR, "MSTuringANNS") - - self.private_nq = 10000 - self.private_qs_url = "https://comp21storage.blob.core.windows.net/publiccontainer/comp21/MSFT-TURING-ANNS/testQuery10K.fbin" - self.private_gt_url = "https://comp21storage.blob.core.windows.net/publiccontainer/comp21/MSFT-TURING-ANNS/gt100-private10K-queries.bin" - - self.private_nq_large = 99605 - self.private_qs_large_url = "https://comp21storage.blob.core.windows.net/publiccontainer/comp21/MSFT-TURING-ANNS/testQuery99605.fbin" - self.private_gt_large_url = "https://comp21storage.blob.core.windows.net/publiccontainer/comp21/MSFT-TURING-ANNS/gt100-private99605-queries.bin" - - def distance(self): - return "euclidean" - - -class MSSPACEV1B(DatasetCompetitionFormat): - def __init__(self, nb_M=1000): - self.nb_M = nb_M - self.nb = 10**6 * nb_M - self.d = 100 - self.nq = 29316 - self.dtype = "int8" - self.ds_fn = "spacev1b_base.i8bin" - self.qs_fn = "query.i8bin" - self.gt_fn = ( - "public_query_gt100.bin" if self.nb_M == 1000 else - subset_url + "GT_100M/msspacev-100M" if self.nb_M == 100 else - subset_url + "GT_10M/msspacev-10M" if self.nb_M == 10 else - None - ) - self.base_url = "https://comp21storage.blob.core.windows.net/publiccontainer/comp21/spacev1b/" - self.basedir = os.path.join(BASEDIR, "MSSPACEV1B") - - self.private_nq = 30000 - self.private_qs_url = "https://comp21storage.blob.core.windows.net/publiccontainer/comp21/spacev1b/private_query_30k.bin" - self.private_gt_url = "https://comp21storage.blob.core.windows.net/publiccontainer/comp21/spacev1b/gt100_private_query_30k.bin" - - def distance(self): - return "euclidean" - -class RandomRangeDS(DatasetCompetitionFormat): - def __init__(self, nb, nq, d): - self.nb = nb - self.nq = nq - self.d = d - self.dtype = 'float32' - self.ds_fn = f"data_{self.nb}_{self.d}" - self.qs_fn = f"queries_{self.nq}_{self.d}" - self.gt_fn = f"gt_{self.nb}_{self.nq}_{self.d}" - self.basedir = os.path.join(BASEDIR, f"random{self.nb}") - if not os.path.exists(self.basedir): - os.makedirs(self.basedir) - - def prepare(self, skip_data=False): - import sklearn.datasets - import sklearn.model_selection - from sklearn.neighbors import NearestNeighbors - - print(f"Preparing datasets with {self.nb} random points and {self.nq} queries.") - - - X, _ = sklearn.datasets.make_blobs( - n_samples=self.nb + self.nq, n_features=self.d, - centers=self.nq, random_state=1) - - data, queries = sklearn.model_selection.train_test_split( - X, test_size=self.nq, random_state=1) - - - with open(os.path.join(self.basedir, self.ds_fn), "wb") as f: - np.array([self.nb, self.d], dtype='uint32').tofile(f) - data.astype('float32').tofile(f) - with open(os.path.join(self.basedir, self.qs_fn), "wb") as f: - np.array([self.nq, self.d], dtype='uint32').tofile(f) - queries.astype('float32').tofile(f) - - print("Computing groundtruth") - - nbrs = NearestNeighbors(n_neighbors=100, metric="euclidean", algorithm='brute').fit(data) - D, I = nbrs.kneighbors(queries) - - nres = np.count_nonzero((D < math.sqrt(self.default_count())) == True, axis=1) - DD = np.zeros(nres.sum()) - II = np.zeros(nres.sum(), dtype='int32') - - s = 0 - for i, l in enumerate(nres): - DD[s : s + l] = D[i, 0 : l] - II[s : s + l] = I[i, 0 : l] - s += l - - with open(os.path.join(self.basedir, self.gt_fn), "wb") as f: - np.array([self.nq, nres.sum()], dtype='uint32').tofile(f) - nres.astype('int32').tofile(f) - II.astype('int32').tofile(f) - DD.astype('float32').tofile(f) - - def get_groundtruth(self, k=None): - """ override the ground-truth function as this is the only range search dataset """ - assert self.gt_fn is not None - fn = self.gt_fn.split("/")[-1] # in case it's a URL - return range_result_read(os.path.join(self.basedir, fn)) - - def search_type(self): - return "range" - - def default_count(self): - return 49 - - def distance(self): - return "euclidean" - - def __str__(self): - return f"RandomRange({self.nb})" - -class RandomDS(DatasetCompetitionFormat): - def __init__(self, nb, nq, d): - self.nb = nb - self.nq = nq - self.d = d - self.dtype = 'float32' - self.ds_fn = f"data_{self.nb}_{self.d}" - self.qs_fn = f"queries_{self.nq}_{self.d}" - self.gt_fn = f"gt_{self.nb}_{self.nq}_{self.d}" - self.basedir = os.path.join(BASEDIR, f"random{self.nb}") - if not os.path.exists(self.basedir): - os.makedirs(self.basedir) - - def prepare(self, skip_data=False): - import sklearn.datasets - import sklearn.model_selection - from sklearn.neighbors import NearestNeighbors - - print(f"Preparing datasets with {self.nb} random points and {self.nq} queries.") - - - X, _ = sklearn.datasets.make_blobs( - n_samples=self.nb + self.nq, n_features=self.d, - centers=self.nq, random_state=1) - - data, queries = sklearn.model_selection.train_test_split( - X, test_size=self.nq, random_state=1) - - - with open(os.path.join(self.basedir, self.ds_fn), "wb") as f: - np.array([self.nb, self.d], dtype='uint32').tofile(f) - data.astype('float32').tofile(f) - with open(os.path.join(self.basedir, self.qs_fn), "wb") as f: - np.array([self.nq, self.d], dtype='uint32').tofile(f) - queries.astype('float32').tofile(f) - - print("Computing groundtruth") - - nbrs = NearestNeighbors(n_neighbors=100, metric="euclidean", algorithm='brute').fit(data) - D, I = nbrs.kneighbors(queries) - with open(os.path.join(self.basedir, self.gt_fn), "wb") as f: - np.array([self.nq, 100], dtype='uint32').tofile(f) - I.astype('uint32').tofile(f) - D.astype('float32').tofile(f) - - def search_type(self): - return "knn" - - def distance(self): - return "euclidean" - - def __str__(self): - return f"Random({self.nb})" - - def default_count(self): - return 10 - - -DATASETS = { - 'bigann-1B': lambda : BigANNDataset(1000), - 'bigann-100M': lambda : BigANNDataset(100), - 'bigann-10M': lambda : BigANNDataset(10), - - 'deep-1B': lambda : Deep1BDataset(), - 'deep-100M': lambda : Deep1BDataset(100), - 'deep-10M': lambda : Deep1BDataset(10), - - 'ssnpp-1B': lambda : SSNPPDataset(1000), - 'ssnpp-10M': lambda : SSNPPDataset(10), - 'ssnpp-100M': lambda : SSNPPDataset(100), - 'ssnpp-1M': lambda : SSNPPDataset(1), - - 'text2image-1B': lambda : Text2Image1B(), - 'text2image-1M': lambda : Text2Image1B(1), - 'text2image-10M': lambda : Text2Image1B(10), - 'text2image-100M': lambda : Text2Image1B(100), - - 'msturing-1B': lambda : MSTuringANNS(1000), - 'msturing-1M': lambda : MSTuringANNS(1), - 'msturing-10M': lambda : MSTuringANNS(10), - 'msturing-100M': lambda : MSTuringANNS(100), - - 'msspacev-1B': lambda : MSSPACEV1B(1000), - 'msspacev-10M': lambda : MSSPACEV1B(10), - 'msspacev-100M': lambda : MSSPACEV1B(100), - 'msspacev-1M': lambda : MSSPACEV1B(1), - - 'random-xs': lambda : RandomDS(10000, 1000, 20), - 'random-s': lambda : RandomDS(100000, 1000, 50), - - 'random-range-xs': lambda : RandomRangeDS(10000, 1000, 20), - 'random-range-s': lambda : RandomRangeDS(100000, 1000, 50), -} diff --git a/benchmark/distances.py b/benchmark/distances.py deleted file mode 100644 index 7a013697c..000000000 --- a/benchmark/distances.py +++ /dev/null @@ -1,16 +0,0 @@ -from scipy.spatial.distance import pdist as scipy_pdist -import itertools -import numpy as np - -def pdist(a, b, metric): - return scipy_pdist([a, b], metric=metric)[0] - -metrics = { - 'euclidean': { - 'distance': lambda a, b: pdist(a, b, "euclidean"), - }, - 'angular': { - 'distance': lambda a, b: pdist(a, b, "cosine"), - } -} - diff --git a/benchmark/main.py b/benchmark/main.py deleted file mode 100644 index 5f0a63059..000000000 --- a/benchmark/main.py +++ /dev/null @@ -1,250 +0,0 @@ -from __future__ import absolute_import -import argparse -import logging -import logging.config - -import docker -import multiprocessing.pool -import os -import psutil -import random -import shutil -import sys -import traceback - -from benchmark.datasets import DATASETS -from benchmark.algorithms.definitions import (get_definitions, - list_algorithms, - algorithm_status, - InstantiationStatus) -from benchmark.results import get_result_filename -from benchmark.runner import run, run_docker, run_no_docker - -from benchmark.sensors.power_capture import power_capture - -def positive_int(s): - i = None - try: - i = int(s) - except ValueError: - pass - if not i or i < 1: - raise argparse.ArgumentTypeError("%r is not a positive integer" % s) - return i - - -def run_worker(args, queue): - print("RW", args) - while not queue.empty(): - definition = queue.get() - memory_margin = 500e6 # reserve some extra memory for misc stuff - mem_limit = int((psutil.virtual_memory().available - memory_margin)) - #mem_limit = 128e9 # 128gb for competition - cpu_limit = "0-%d" % (multiprocessing.cpu_count() - 1) - - if args.nodocker: - run_no_docker(definition, args.dataset, args.count, - args.runs, args.timeout, args.rebuild, cpu_limit, mem_limit, - args.t3, args.power_capture, - args.upload_index, args.download_index, - args.blob_prefix, args.sas_string, - args.private_query) - - else: - run_docker(definition, args.dataset, args.count, - args.runs, args.timeout, args.rebuild, cpu_limit, mem_limit, - args.t3, args.power_capture, - args.upload_index, args.download_index, - args.blob_prefix, args.sas_string, - args.private_query) - - -def main(): - parser = argparse.ArgumentParser( - formatter_class=argparse.ArgumentDefaultsHelpFormatter) - parser.add_argument( - '--dataset', - metavar='NAME', - help='the dataset to load training points from', - default='sift-1M', - choices=DATASETS.keys()) - parser.add_argument( - "-k", "--count", - default=-1, - type=int, - help="the number of near neighbours to search for") - parser.add_argument( - '--definitions', - metavar='FILE', - help='load algorithm definitions from FILE', - default='algos.yaml') - parser.add_argument( - '--algorithm', - metavar='NAME', - help='run only the named algorithm', - default=None) - parser.add_argument( - '--docker-tag', - metavar='NAME', - help='run only algorithms in a particular docker image', - default=None) - parser.add_argument( - '--list-algorithms', - help='print the names of all known algorithms and exit', - action='store_true') - parser.add_argument( - '--force', - help='re-run algorithms even if their results already exist', - action='store_true') - parser.add_argument( - '--rebuild', - help='re-build index even if it exists', - action='store_true') - parser.add_argument( - '--runs', - metavar='COUNT', - type=positive_int, - help='run each algorithm instance %(metavar)s times and use only' - ' the best result', - default=5) - parser.add_argument( - '--timeout', - type=int, - help='Timeout (in seconds) for each individual algorithm run, or -1' - 'if no timeout should be set', - default=12 * 3600) - parser.add_argument( - '--max-n-algorithms', - type=int, - help='Max number of algorithms to run (just used for testing)', - default=-1) - parser.add_argument( - '--power-capture', - help='Power capture parameters for the T3 competition. ' - 'Format is "ip:port:capture_time_in seconds" (ie, 127.0.0.1:3000:10).', - default="") - parser.add_argument( - '--t3', - help='Run as a T3 participant.', - action='store_true') - parser.add_argument( - '--nodocker', - help='Override default of invoking algorithm in docker container.', - action='store_true') - parser.add_argument( - '--upload-index', - help='Upload index to Azure blob storage and avoid local queries.', - action='store_true') - parser.add_argument( - '--download-index', - help='Download index uploaded to Azure blob storage and run local queries.', - action='store_true') - parser.add_argument( - '--blob-prefix', - help='Azure blob prefix to upload indices to and download indices from.' - ) - parser.add_argument( - '--sas-string', - help='SAS string to authenticate to Azure blob storage.' - ) - parser.add_argument( - '--private-query', - help='Use the new set of private queries that were not released during the competition period.', - action='store_true' - ) - - - args = parser.parse_args() - if args.timeout == -1: - args.timeout = None - - if args.list_algorithms: - list_algorithms(args.definitions) - sys.exit(0) - - if args.power_capture: - # validate power capture environment - power_capture( args.power_capture ) - power_capture.ping() - - logging.config.fileConfig("logging.conf") - logger = logging.getLogger("annb") - - dataset = DATASETS[args.dataset]() - dataset.prepare(True) # prepare dataset, but skip potentially huge base vectors - dimension = dataset.d - point_type = 'float' - distance = dataset.distance() - if args.count == -1: - args.count = dataset.default_count() - definitions = get_definitions( - args.definitions, dimension, args.dataset, distance, args.count) - - # Filter out, from the loaded definitions, all those query argument groups - # that correspond to experiments that have already been run. (This might - # mean removing a definition altogether, so we can't just use a list - # comprehension.) - filtered_definitions = [] - for definition in definitions: - query_argument_groups = definition.query_argument_groups - if not query_argument_groups: - query_argument_groups = [[]] - not_yet_run = [] - for query_arguments in query_argument_groups: - if type(query_arguments) != list: - query_arguments = [query_arguments] - fn = get_result_filename(args.dataset, - args.count, definition, - query_arguments) - if args.force or not os.path.exists(fn): - not_yet_run.append(query_arguments) - if not_yet_run: - if definition.query_argument_groups: - definition = definition._replace( - query_argument_groups=not_yet_run) - filtered_definitions.append(definition) - definitions = filtered_definitions - - random.shuffle(definitions) - - if args.algorithm: - logger.info(f'running only {args.algorithm}') - definitions = [d for d in definitions if d.algorithm == args.algorithm] - - if not args.nodocker: - # See which Docker images we have available - docker_client = docker.from_env() - docker_tags = set() - for image in docker_client.images.list(): - for tag in image.tags: - tag = tag.split(':')[0] - docker_tags.add(tag) - - if args.docker_tag: - logger.info(f'running only {args.docker_tag}') - definitions = [ - d for d in definitions if d.docker_tag == args.docker_tag] - - if set(d.docker_tag for d in definitions).difference(docker_tags): - logger.info(f'not all docker images available, only: {set(docker_tags)}') - logger.info(f'missing docker images: ' - f'{str(set(d.docker_tag for d in definitions).difference(docker_tags))}') - definitions = [ - d for d in definitions if d.docker_tag in docker_tags] - - if args.max_n_algorithms >= 0: - definitions = definitions[:args.max_n_algorithms] - - if len(definitions) == 0: - raise Exception('Nothing to run') - else: - logger.info(f'Order: {definitions}') - - queue = multiprocessing.Queue() - for definition in definitions: - queue.put(definition) - #run_worker(args, queue) - workers = [multiprocessing.Process(target=run_worker, args=(args, queue)) - for i in range(1)] - [worker.start() for worker in workers] - [worker.join() for worker in workers] diff --git a/benchmark/plotting/__init__.py b/benchmark/plotting/__init__.py deleted file mode 100644 index a374fb97d..000000000 --- a/benchmark/plotting/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -from __future__ import absolute_import -from benchmark.plotting import * diff --git a/benchmark/plotting/eval_range_search.py b/benchmark/plotting/eval_range_search.py deleted file mode 100644 index 06b68411d..000000000 --- a/benchmark/plotting/eval_range_search.py +++ /dev/null @@ -1,220 +0,0 @@ -""" -This script contains functions to evaluate the range search results. - -Range search results are represented like a sparse CSR matrix with 3 components: - - lims, I, D - -The results for query #q are: - - I[lims[q]:lims[q + 1]] in int - -And the corresponding distances are: - - D[lims[q]:lims[q + 1]] in float - -Thus, len(lims) = nq + 1, lims[i + 1] >= lims[i] forall i -and len(D) = len(I) = lims[-1]. - -The function that computes the Average Precision measure for -a search result vs. the ground-truth is compute_AP. - -Note that the ground truth format in datasets.py is slightly different: -instead of lims it returns the number of results per query, nres. -The relationship between them is - - nres = lims[1:] - lims[:-1] - -and - - lims = np.zeros(len(nres) + 1, int) - lims[1:] = np.cumsum(nres) - -""" - -import numpy as np -from multiprocessing.pool import ThreadPool - -## code copied from Faiss contrib - -def counts_to_PR(ngt, nres, ninter, mode="overall"): - """ computes a precision-recall for a ser of queries. - ngt = nb of GT results per query - nres = nb of found results per query - ninter = nb of correct results per query (smaller than nres of course) - """ - - if mode == "overall": - ngt, nres, ninter = ngt.sum(), nres.sum(), ninter.sum() - - if nres > 0: - precision = ninter / nres - else: - precision = 1.0 - - if ngt > 0: - recall = ninter / ngt - elif nres == 0: - recall = 1.0 - else: - recall = 0.0 - - return precision, recall - - elif mode == "average": - # average precision and recall over queries - - mask = ngt == 0 - ngt[mask] = 1 - - recalls = ninter / ngt - recalls[mask] = (nres[mask] == 0).astype(float) - - # avoid division by 0 - mask = nres == 0 - assert np.all(ninter[mask] == 0) - ninter[mask] = 1 - nres[mask] = 1 - - precisions = ninter / nres - - return precisions.mean(), recalls.mean() - - else: - raise AssertionError() - - -def sort_range_res_2(lims, D, I): - """ sort 2 arrays using the first as key """ - I2 = np.empty_like(I) - D2 = np.empty_like(D) - nq = len(lims) - 1 - for i in range(nq): - l0, l1 = lims[i], lims[i + 1] - ii = I[l0:l1] - di = D[l0:l1] - o = di.argsort() - I2[l0:l1] = ii[o] - D2[l0:l1] = di[o] - return I2, D2 - - -def sort_range_res_1(lims, I): - I2 = np.empty_like(I) - nq = len(lims) - 1 - for i in range(nq): - l0, l1 = lims[i], lims[i + 1] - I2[l0:l1] = I[l0:l1] - I2[l0:l1].sort() - return I2 - - -def range_PR_multiple_thresholds( - lims_ref, Iref, - lims_new, Dnew, Inew, - thresholds, - mode="overall", do_sort="ref,new" - ): - """ compute precision-recall values for range search results - for several thresholds on the "new" results. - This is to plot PR curves - """ - # ref should be sorted by ids - if "ref" in do_sort: - Iref = sort_range_res_1(lims_ref, Iref) - - # new should be sorted by distances - if "new" in do_sort: - Inew, Dnew = sort_range_res_2(lims_new, Dnew, Inew) - - def ref_result_for(i): - return Iref[lims_ref[i]:lims_ref[i + 1]] - - def new_result_for(i): - l0, l1 = lims_new[i], lims_new[i + 1] - return Inew[l0:l1], Dnew[l0:l1] - - nq = lims_ref.size - 1 - assert lims_new.size - 1 == nq - - nt = len(thresholds) - counts = np.zeros((nq, nt, 3), dtype="int64") - - def compute_PR_for(q): - gt_ids = ref_result_for(q) - res_ids, res_dis = new_result_for(q) - - counts[q, :, 0] = len(gt_ids) - - if res_dis.size == 0: - # the rest remains at 0 - return - - # which offsets we are interested in - nres = np.searchsorted(res_dis, thresholds) - counts[q, :, 1] = nres - - if gt_ids.size == 0: - return - - # find number of TPs at each stage in the result list - ii = np.searchsorted(gt_ids, res_ids) - ii[ii == len(gt_ids)] = -1 - n_ok = np.cumsum(gt_ids[ii] == res_ids) - - # focus on threshold points - n_ok = np.hstack(([0], n_ok)) - counts[q, :, 2] = n_ok[nres] - - pool = ThreadPool(20) - pool.map(compute_PR_for, range(nq)) - # print(counts.transpose(2, 1, 0)) - - precisions = np.zeros(nt) - recalls = np.zeros(nt) - for t in range(nt): - p, r = counts_to_PR( - counts[:, t, 0], counts[:, t, 1], counts[:, t, 2], - mode=mode - ) - precisions[t] = p - recalls[t] = r - - return precisions, recalls - - - -def compute_AP(gt, res): - """ - compute range search average precision. - It works by: - 1. defining a set of thresholds - 2. compute precision, recall for the thresholds - 3. compute AUC of rhe precision-recall curve. - """ - gt_lims, gt_I, gt_D = gt - res_lims, res_I, res_D = res - - if len(res_D) == 0: - return 0.0 - - # start with negative distance to be sure to have the - # (p, r) = (1, 0) point - dmax = res_D.max() - thresholds = np.linspace(-0.001, res_D.max(), 100) - - precisions, recalls = range_PR_multiple_thresholds( - gt_lims, gt_I, - res_lims, res_D, res_I, thresholds) - - # compute average precision using trapezoids - accu = 0 - n = len(precisions) - for i in range(n - 1): - x0, x1 = recalls[i : i + 2] - y0, y1 = precisions[i : i + 2] - accu += (x1 - x0) * (y1 + y0) / 2 - - return accu - - diff --git a/benchmark/plotting/metrics.py b/benchmark/plotting/metrics.py deleted file mode 100644 index 043cb6381..000000000 --- a/benchmark/plotting/metrics.py +++ /dev/null @@ -1,173 +0,0 @@ -from __future__ import absolute_import -import numpy as np -import itertools -import operator -import random -import sys -import copy - -from benchmark.plotting.eval_range_search import compute_AP -from benchmark.sensors.power_capture import power_capture - -def compute_recall_without_distance_ties(true_ids, run_ids, count): - return len(set(true_ids) & set(run_ids)) - -def compute_recall_with_distance_ties(true_ids, true_dists, run_ids, count): - # This function assumes "true_dists" is monotonic either increasing or decreasing - - found_tie = False - gt_size = np.shape(true_dists)[0] - - if gt_size==count: - # nothing fancy to do in this case - recall = len(set(true_ids[:count]) & set(run_ids)) - - else: - dist_tie_check = true_dists[count-1] # tie check anchored at count-1 in GT dists - - set_end = gt_size - - for i in range(count, gt_size): - is_close = abs(dist_tie_check - true_dists[i] ) < 1e-6 - if not is_close: - set_end = i - break - - found_tie = set_end > count - - recall = len(set(true_ids[:set_end]) & set(run_ids)) - - return recall, found_tie - -def get_recall_values(true_nn, run_nn, count, count_ties=True): - true_ids, true_dists = true_nn - if not count_ties: - true_ids = true_ids[:, :count] - assert true_ids.shape == run_nn.shape - recalls = np.zeros(len(run_nn)) - queries_with_ties = 0 - # TODO probably not very efficient - for i in range(len(run_nn)): - if count_ties: - recalls[i], found_tie = compute_recall_with_distance_ties(true_ids[i], true_dists[i], run_nn[i], count) - if found_tie: queries_with_ties += 1 - else: - recalls[i] = compute_recall_without_distance_ties(true_ids[i], run_nn[i], count) - return (np.mean(recalls) / float(count), - np.std(recalls) / float(count), - recalls, - queries_with_ties) - -def knn(true_nn, run_nn, count, metrics): - if 'knn' not in metrics: - print('Computing knn metrics') - knn_metrics = metrics.create_group('knn') - mean, std, recalls, queries_with_ties = get_recall_values(true_nn, run_nn, count) - if queries_with_ties>0: - print("Warning: %d/%d queries contained ties accounted for in recall" % (queries_with_ties, len(run_nn))) - knn_metrics.attrs['mean'] = mean - knn_metrics.attrs['std'] = std - knn_metrics['recalls'] = recalls - else: - print("Found cached result") - return metrics['knn'] - -def ap(true_nn, run_nn, metrics): - if'ap' not in metrics: - print('Computing ap metrics') - gt_nres, gt_I, gt_D = true_nn - nq = gt_nres.shape[0] - gt_lims = np.zeros(nq + 1, dtype=int) - gt_lims[1:] = np.cumsum(gt_nres) - ap = compute_AP((gt_lims, gt_I, gt_D), run_nn) - ap_metric = metrics.create_group('ap') - ap_metric.attrs['mean'] = ap - else: - print("Found cached result") - return metrics['ap'].attrs['mean'] - -def queries_per_second(nq, attrs): - return nq / attrs["best_search_time"] - - -def index_size(attrs): - return attrs.get("index_size", 0) - - -def build_time(attrs): - return attrs.get("build_time", 1e6) - - -def dist_computations(nq, attrs): - return attrs.get("dist_comps", 0) / (attrs['run_count'] * nq) - -def watt_seconds_per_query(queries, attrs): - return power_capture.compute_watt_seconds_per_query(queries, attrs ) - -def mean_ssd_ios(attrs): - return attrs.get("mean_ssd_ios", 0) - -def mean_latency(attrs): - return attrs.get("mean_latency", 0) - -all_metrics = { - "k-nn": { - "description": "Recall", - "function": lambda true_nn, run_nn, metrics, run_attrs: knn(true_nn, run_nn, run_attrs["count"], metrics).attrs['mean'], # noqa - "worst": float("-inf"), - "lim": [0.0, 1.03], - }, - "ap": { - "description": "Average Precision", - "function": lambda true_nn, run_nn, metrics, run_attrs: ap(true_nn, run_nn, metrics), # noqa - "worst": float("-inf"), - "lim": [0.0, 1.03], - "search_type" : "range", - }, - "qps": { - "description": "Queries per second (1/s)", - "function": lambda true_nn, run_nn, metrics, run_attrs: queries_per_second(len(true_nn[0]), run_attrs), # noqa - "worst": float("-inf") - }, - "distcomps": { - "description": "Distance computations", - "function": lambda true_nn, run_nn, metrics, run_attrs: dist_computations(len(true_nn[0]), run_attrs), # noqa - "worst": float("inf") - }, - "build": { - "description": "Build time (s)", - "function": lambda true_nn, run_nn, metrics, run_attrs: build_time(run_attrs), # noqa - "worst": float("inf") - }, - "indexsize": { - "description": "Index size (kB)", - "function": lambda true_nn, run_nn, metrics, run_attrs: index_size(run_attrs), # noqa - "worst": float("inf") - }, - "queriessize": { - "description": "Index size (kB)/Queries per second (s)", - "function": lambda true_nn, run_nn, metrics, run_attrs: index_size(run_attrs) / queries_per_second(len(true_nn[0]), run_attrs), # noqa - "worst": float("inf") - }, - "wspq": { - "description": "Watt seconds per query (watt*s/query)", - "function": lambda true_nn, run_nn, metrics, run_attrs: watt_seconds_per_query(true_nn, run_attrs), - "worst": float("-inf") - }, - "mean_ssd_ios": { - "description": "Average SSD I/Os per query", - "function": lambda true_nn, run_nn, metrics, run_attrs: mean_ssd_ios(run_attrs), - "worst": float("inf") - }, - "mean_latency": { - "description": "Mean latency across queries", - "function": lambda true_nn, run_nn, metrics, run_attrs: mean_latency(run_attrs), - "worst": float("inf") - }, - "search_times": { - "description": "List of consecutive search times for the same run parameter", - "function": lambda true_nn, run_nn, metrics, run_attrs: run_attrs.get("search_times",[]), - "worst": float("inf") - }, - -} diff --git a/benchmark/plotting/plot_variants.py b/benchmark/plotting/plot_variants.py deleted file mode 100644 index 2a1743e98..000000000 --- a/benchmark/plotting/plot_variants.py +++ /dev/null @@ -1,10 +0,0 @@ -from benchmark.plotting.metrics import all_metrics as metrics - -all_plot_variants = { - "recall/time": ("k-nn", "qps"), - "recall/buildtime": ("k-nn", "build"), - "recall/indexsize": ("k-nn", "indexsize"), - "recall/distcomps": ("k-nn", "distcomps"), - "recall/candidates": ("k-nn", "candidates"), - "recall/qpssize": ("k-nn", "queriessize"), -} diff --git a/benchmark/plotting/utils.py b/benchmark/plotting/utils.py deleted file mode 100644 index bcfe8ead3..000000000 --- a/benchmark/plotting/utils.py +++ /dev/null @@ -1,194 +0,0 @@ -from __future__ import absolute_import - -import itertools -import numpy -from benchmark.plotting.metrics import all_metrics as metrics -from benchmark.sensors.power_capture import power_capture -import traceback -import sys - -def get_or_create_metrics(run): - if 'metrics' not in run: - run.create_group('metrics') - return run['metrics'] - - -def create_pointset(data, xn, yn): - xm, ym = (metrics[xn], metrics[yn]) - rev_y = -1 if ym["worst"] < 0 else 1 - rev_x = -1 if xm["worst"] < 0 else 1 - data.sort(key=lambda t: (rev_y * t[-1], rev_x * t[-2])) - - axs, ays, als = [], [], [] - # Generate Pareto frontier - xs, ys, ls = [], [], [] - last_x = xm["worst"] - comparator = ((lambda xv, lx: xv > lx) - if last_x < 0 else (lambda xv, lx: xv < lx)) - for algo, algo_name, xv, yv in data: - if not xv or not yv: - continue - axs.append(xv) - ays.append(yv) - als.append(algo_name) - if comparator(xv, last_x): - last_x = xv - xs.append(xv) - ys.append(yv) - ls.append(algo_name) - return xs, ys, ls, axs, ays, als - - -def compute_metrics(true_nn, res, metric_1, metric_2, - recompute=False): - all_results = {} - for i, (properties, run) in enumerate(res): - algo = properties['algo'] - algo_name = properties['name'] - # cache indices to avoid access to hdf5 file - if metric_1 == "ap" or metric_2 == "ap": - run_nn = (numpy.array(run['lims']), - numpy.array(run['neighbors']), - numpy.array(run['distances'])) - else: - run_nn = numpy.array(run['neighbors']) - if recompute and 'metrics' in run: - del run['metrics'] - metrics_cache = get_or_create_metrics(run) - - metric_1_value = metrics[metric_1]['function']( - true_nn, run_nn, metrics_cache, properties) - metric_2_value = metrics[metric_2]['function']( - true_nn, run_nn, metrics_cache, properties) - - print('%3d: %80s %12.3f %12.3f' % - (i, algo_name, metric_1_value, metric_2_value)) - - all_results.setdefault(algo, []).append( - (algo, algo_name, metric_1_value, metric_2_value)) - - return all_results - -def compute_metrics_all_runs(dataset, res, recompute=False, - sensor_metrics=False, search_times=False, - private_query=False): - - try: - if private_query: - true_nn = dataset.get_private_groundtruth() - else: - true_nn = dataset.get_groundtruth() - except: - print(f"Groundtruth for {dataset} not found.") - #traceback.print_exc() - return - - search_type = dataset.search_type() - for i, (properties, run) in enumerate(res): - algo = properties['algo'] - algo_name = properties['name'] - # cache distances to avoid access to hdf5 file - if search_type == "knn": - run_nn = numpy.array(run['neighbors']) - elif search_type == "range": - run_nn = (numpy.array(run['lims']), - numpy.array(run['neighbors']), - numpy.array(run['distances'])) - if recompute and 'metrics' in run: - print('Recomputing metrics, clearing cache') - del run['metrics'] - metrics_cache = get_or_create_metrics(run) - - dataset = properties['dataset'] - try: - dataset = dataset.decode() - algo = algo.decode() - algo_name = algo_name.decode() - except: - pass - - run_result = { - 'algorithm': algo, - 'parameters': algo_name, - 'dataset': dataset, - 'count': properties['count'], - } - for name, metric in metrics.items(): - if search_type == "knn" and name == "ap" or\ - search_type == "range" and name == "k-nn": - continue - if not sensor_metrics and name=="wspq": #don't process power sensor_metrics by default - continue - if not search_times and name=="search_times": #don't process search_times by default - continue - v = metric["function"](true_nn, run_nn, metrics_cache, properties) - run_result[name] = v - yield run_result - -#def compute_all_metrics(true_nn, run, properties, recompute=False): -# algo = properties["algo"] -# algo_name = properties["name"] -# print('--') -# print(algo_name) -# results = {} -# # cache nn to avoid access to hdf5 file -# run_nn = numpy.array(run["neighbors"]) -# if recompute and 'metrics' in run: -# del run['metrics'] -# metrics_cache = get_or_create_metrics(run) -# -# for name, metric in metrics.items(): -# v = metric["function"]( -# true_nn, run_nn, metrics_cache, properties) -# results[name] = v -# if v: -# print('%s: %g' % (name, v)) -# return (algo, algo_name, results) -# - -def generate_n_colors(n): - vs = numpy.linspace(0.3, 0.9, 7) - colors = [(.9, .4, .4, 1.)] - - def euclidean(a, b): - return sum((x - y)**2 for x, y in zip(a, b)) - while len(colors) < n: - new_color = max(itertools.product(vs, vs, vs), - key=lambda a: min(euclidean(a, b) for b in colors)) - colors.append(new_color + (1.,)) - return colors - - -def create_linestyles(unique_algorithms): - colors = dict( - zip(unique_algorithms, generate_n_colors(len(unique_algorithms)))) - linestyles = dict((algo, ['--', '-.', '-', ':'][i % 4]) - for i, algo in enumerate(unique_algorithms)) - markerstyles = dict((algo, ['+', '<', 'o', '*', 'x'][i % 5]) - for i, algo in enumerate(unique_algorithms)) - faded = dict((algo, (r, g, b, 0.3)) - for algo, (r, g, b, a) in colors.items()) - return dict((algo, (colors[algo], faded[algo], - linestyles[algo], markerstyles[algo])) - for algo in unique_algorithms) - - -def get_up_down(metric): - if metric["worst"] == float("inf"): - return "down" - return "up" - - -def get_left_right(metric): - if metric["worst"] == float("inf"): - return "left" - return "right" - - -def get_plot_label(xm, ym): - template = ("%(xlabel)s-%(ylabel)s tradeoff - %(updown)s and" - " to the %(leftright)s is better") - return template % {"xlabel": xm["description"], - "ylabel": ym["description"], - "updown": get_up_down(ym), - "leftright": get_left_right(xm)} diff --git a/benchmark/results.py b/benchmark/results.py deleted file mode 100644 index ff2ec5ee6..000000000 --- a/benchmark/results.py +++ /dev/null @@ -1,78 +0,0 @@ -from __future__ import absolute_import - -import h5py -import json -import os -import re -import traceback - - -def get_result_filename(dataset=None, count=None, definition=None, - query_arguments=None): - d = ['results'] - if dataset: - d.append(dataset) - if count: - d.append(str(count)) - if definition: - d.append(definition.algorithm) - build_args = definition.arguments - try: - for args in build_args: - if type(args) == dict and 'indexkey' in args: - build_args = [args['indexkey']] - except: - pass - data = build_args + query_arguments - data = re.sub(r'\W+', '_', json.dumps(data, sort_keys=True)).strip('_') - if len(data) > 150: - data = data[-149:] - d.append(data) - - return os.path.join(*d) - - -def store_results(dataset, count, definition, query_arguments, - attrs, results, search_type): - fn = get_result_filename( - dataset, count, definition, query_arguments) + '.hdf5' - head, tail = os.path.split(fn) - if not os.path.isdir(head): - os.makedirs(head) - f = h5py.File(fn, 'w') - for k, v in attrs.items(): - f.attrs[k] = v - if search_type == "knn": - neighbors = f.create_dataset('neighbors', (len(results), count), 'i') - for i, idxs in enumerate(results): - neighbors[i] = idxs - elif search_type == "range": - lims, D, I= results - f.create_dataset('neighbors', data=I) - f.create_dataset('lims', data=lims) - f.create_dataset('distances', data=D) - else: - raise NotImplementedError() - f.close() - - -def load_all_results(dataset=None, count=None): - """ - A generator for all result files. - """ - for root, _, files in os.walk(get_result_filename(dataset, count)): - for fn in files: - if os.path.splitext(fn)[-1] != '.hdf5': - continue - try: - f = h5py.File(os.path.join(root, fn), 'r+') - properties = dict(f.attrs) - yield properties, f - f.close() - except: - print('Was unable to read', fn) - traceback.print_exc() - - -def get_unique_algorithms(): - return set(properties['algo'] for properties, _ in load_all_results()) diff --git a/benchmark/runner.py b/benchmark/runner.py deleted file mode 100644 index 1218d8e88..000000000 --- a/benchmark/runner.py +++ /dev/null @@ -1,351 +0,0 @@ -import argparse -import json -import logging -import os -import threading -import time -import traceback - -import colors -import docker -import numpy -import psutil - -from benchmark.algorithms.definitions import (Definition, - instantiate_algorithm) - -from benchmark.datasets import DATASETS, upload_accelerated, download_accelerated -from benchmark.results import store_results - -from benchmark.sensors.power_capture import power_capture -from benchmark.t3.helper import t3_create_container - -def run_individual_query(algo, X, distance, count, run_count, search_type): - best_search_time = float('inf') - search_times = [] - for i in range(run_count): - print('Run %d/%d...' % (i + 1, run_count)) - - start = time.time() - if search_type == "knn": - algo.query(X, count) - total = (time.time() - start) - results = algo.get_results() - assert len(results) == len(X) - else: - algo.range_query(X, count) - total = (time.time() - start) - results = algo.get_range_results() - - search_time = total - best_search_time = min(best_search_time, search_time) - search_times.append( search_time ) - - attrs = { - "best_search_time": best_search_time, - "name": str(algo), - "run_count": run_count, - "distance": distance, - "type": search_type, - "count": int(count), - "search_times": search_times - } - additional = algo.get_additional() - for k in additional: - attrs[k] = additional[k] - return (attrs, results) - -def run(definition, dataset, count, run_count, rebuild, - upload_index=False, download_index=False, - blob_prefix="", sas_string="", private_query=False): - - algo = instantiate_algorithm(definition) - assert not definition.query_argument_groups \ - or hasattr(algo, "set_query_arguments"), """\ -error: query argument groups have been specified for %s.%s(%s), but the \ -algorithm instantiated from it does not implement the set_query_arguments \ -function""" % (definition.module, definition.constructor, definition.arguments) - - assert not upload_index or not download_index - - ds = DATASETS[dataset]() - #X_train = numpy.array(D['train']) - if not private_query: - X = ds.get_queries() - else: - X = ds.get_private_queries() - distance = ds.distance() - search_type = ds.search_type() - print(f"Running {definition.algorithm} on {dataset}") - print(fr"Got {len(X)} queries") - - try: - # Try loading the index from the file - memory_usage_before = algo.get_memory_usage() - if download_index: - local_dir, index_prefix, components = algo.index_files_to_store(dataset) - remote_location = blob_prefix + '/' + algo.track() + '/' + algo.__str__() + '/' + DATASETS[dataset]().short_name() + '/' - for component in components: - download_accelerated(remote_location + index_prefix + component, - local_dir + '/' + index_prefix + component, - False, sas_string) - print("Index files downloaded.") - if algo.load_index(dataset): - print("Index loaded.") - else: - print("Index load failed.") - elif rebuild or not algo.load_index(dataset): - # Build the index if it is not available - t0 = time.time() - algo.fit(dataset) - build_time = time.time() - t0 - print('Built index in', build_time) - else: - print("Loaded existing index") - - - index_size = algo.get_memory_usage() - memory_usage_before - print('Index memory footprint: ', index_size) - - if upload_index: - print("Starting index upload...") - local_dir, index_prefix, components = algo.index_files_to_store(dataset) - remote_location = blob_prefix + '/' + algo.track() + '/' + algo.__str__() + '/' + DATASETS[dataset]().short_name() - for component in components: - upload_accelerated(local_dir, remote_location, - index_prefix + component, sas_string) - else: - print("Starting query") - query_argument_groups = definition.query_argument_groups - # Make sure that algorithms with no query argument groups still get run - # once by providing them with a single, empty, harmless group - if not query_argument_groups: - query_argument_groups = [[]] - - for pos, query_arguments in enumerate(query_argument_groups, 1): - print("Running query argument group %d of %d..." % - (pos, len(query_argument_groups))) - if query_arguments: - algo.set_query_arguments(*query_arguments) - descriptor, results = run_individual_query( - algo, X, distance, count, run_count, search_type) - # A bit unclear how to set this correctly if we usually load from file - #descriptor["build_time"] = build_time - descriptor["index_size"] = index_size - descriptor["algo"] = definition.algorithm - descriptor["dataset"] = dataset - if power_capture.enabled(): - power_stats = power_capture.run(algo, X, distance, count, - run_count, search_type, descriptor) - - store_results(dataset, count, definition, - query_arguments, descriptor, results, search_type) - finally: - algo.done() - - -def run_from_cmdline(args=None): - parser = argparse.ArgumentParser(''' - - NOTICE: You probably want to run.py rather than this script. - -''') - parser.add_argument( - '--dataset', - choices=DATASETS.keys(), - help=f'Dataset to benchmark on.', - required=True) - parser.add_argument( - '--algorithm', - help='Name of algorithm for saving the results.', - required=True) - parser.add_argument( - '--module', - help='Python module containing algorithm. E.g. "ann_benchmarks.algorithms.annoy"', - required=True) - parser.add_argument( - '--constructor', - help='Constructer to load from module. E.g. "Annoy"', - required=True) - parser.add_argument( - '--count', - help='k: Number of nearest neighbours for the algorithm to return.', - required=True, - type=int) - parser.add_argument( - '--rebuild', - help='re-build index even if it exists', - action='store_true') - parser.add_argument( - '--runs', - help='Number of times to run the algorihm. Will use the fastest run-time over the bunch.', - required=True, - type=int) - parser.add_argument( - 'build', - help='JSON of arguments to pass to the constructor. E.g. ["angular", 100]' - ) - parser.add_argument( - 'queries', - help='JSON of arguments to pass to the queries. E.g. [100]', - nargs='*', - default=[]) - parser.add_argument( - '--power-capture', - help='Power capture parameters for the T3 competition. ' - 'Format is "ip:port:capture_time_in_seconds (ie, 127.0.0.1:3000:10).', - default="") - parser.add_argument( - '--upload-index', - help='Upload index to cloud storage.', - action='store_true') - parser.add_argument( - '--download-index', - help='Download index from cloud storage.', - action='store_true') - parser.add_argument( - '--blob-prefix', - help='Azure blob prefix to upload index to or download index from.') - parser.add_argument( - '--sas-string', - help='SAS string to authenticate to Azure blob storage.') - parser.add_argument( - '--private-query', - help='Use the new set of private queries that were not released during the competition period.', - action="store_true") - - args = parser.parse_args(args) - algo_args = json.loads(args.build) - print(algo_args) - query_args = [json.loads(q) for q in args.queries] - - if args.power_capture: - power_capture( args.power_capture ) - power_capture.ping() - - definition = Definition( - algorithm=args.algorithm, - docker_tag=None, # not needed - module=args.module, - constructor=args.constructor, - arguments=algo_args, - query_argument_groups=query_args, - disabled=False - ) - run(definition, args.dataset, args.count, args.runs, args.rebuild, - args.upload_index, args.download_index, args.blob_prefix, args.sas_string, - args.private_query) - - -def run_docker(definition, dataset, count, runs, timeout, rebuild, - cpu_limit, mem_limit=None, t3=None, power_capture=None, - upload_index=False, download_index=False, - blob_prefix="", sas_string="", private_query=False): - cmd = ['--dataset', dataset, - '--algorithm', definition.algorithm, - '--module', definition.module, - '--constructor', definition.constructor, - '--runs', str(runs), - '--count', str(count)] - if power_capture: - cmd += ["--power-capture", power_capture ] - if rebuild: - cmd.append("--rebuild") - if upload_index: - cmd.append("--upload-index") - cmd += ["--blob-prefix", blob_prefix] - cmd += ["--sas-string", sas_string] - if download_index: - cmd.append("--download-index") - cmd += ["--blob-prefix", blob_prefix] - cmd += ["--sas-string", sas_string] - if private_query==True: - cmd.append("--private-query") - - cmd.append(json.dumps(definition.arguments)) - cmd += [json.dumps(qag) for qag in definition.query_argument_groups] - - client = docker.from_env() - if mem_limit is None: - mem_limit = psutil.virtual_memory().available - - - container = None - if t3: - container = t3_create_container(definition, cmd, cpu_limit, mem_limit ) - timeout = 3600*24*3 # 3 days - print("Setting container wait timeout to 3 days") - - else: - container = client.containers.run( - definition.docker_tag, - cmd, - volumes={ - os.path.abspath('benchmark'): - {'bind': '/home/app/benchmark', 'mode': 'ro'}, - os.path.abspath('data'): - {'bind': '/home/app/data', 'mode': 'rw'}, - os.path.abspath('results'): - {'bind': '/home/app/results', 'mode': 'rw'}, - }, - cpuset_cpus=cpu_limit, - mem_limit=mem_limit, - detach=True) - - logger = logging.getLogger(f"annb.{container.short_id}") - - logger.info('Created container %s: CPU limit %s, mem limit %s, timeout %d, command %s' % \ - (container.short_id, cpu_limit, mem_limit, timeout, cmd)) - - def stream_logs(): - for line in container.logs(stream=True): - logger.info(colors.color(line.decode().rstrip(), fg='blue')) - - t = threading.Thread(target=stream_logs, daemon=True) - t.start() - - try: - exit_code = container.wait(timeout=timeout) - - # Exit if exit code - if exit_code not in [0, None]: - logger.error(colors.color(container.logs().decode(), fg='red')) - logger.error('Child process for container %s raised exception %d' % (container.short_id, exit_code)) - except: - logger.error('Container.wait for container %s failed with exception' % container.short_id) - logger.error('Invoked with %s' % cmd) - traceback.print_exc() - finally: - container.remove(force=True) - - -def run_no_docker(definition, dataset, count, runs, timeout, rebuild, - cpu_limit, mem_limit=None, t3=False, power_capture=None, - upload_index=False, download_index=False, - blob_prefix="", sas_string="", private_query=False): - cmd = ['--dataset', dataset, - '--algorithm', definition.algorithm, - '--module', definition.module, - '--constructor', definition.constructor, - '--runs', str(runs), - '--count', str(count)] - if power_capture: - cmd += ["--power-capture", power_capture ] - if rebuild: - cmd.append("--rebuild") - if upload_index: - cmd.append("--upload-index") - cmd += ["--blob-prefix", blob_prefix] - cmd += ["--sas-string", sas_string] - if download_index: - cmd.append("--download-index") - cmd += ["--blob-prefix", blob_prefix] - cmd += ["--sas-string", sas_string] - if private_query==True: - cmd.append("--private-query") - - cmd.append(json.dumps(definition.arguments)) - cmd += [json.dumps(qag) for qag in definition.query_argument_groups] - run_from_cmdline(cmd) - - diff --git a/benchmark/sensors/power_capture.py b/benchmark/sensors/power_capture.py deleted file mode 100644 index 664c39746..000000000 --- a/benchmark/sensors/power_capture.py +++ /dev/null @@ -1,248 +0,0 @@ -import requests -import uuid -import json -import time -import statistics -import math - -class power_capture: - - """ - This singleton class provides various capabilites related to the T3 track - of the Big ANN Competition for NeurIPS 2021: - * communicates with an ipmicap server ( see http://github.com/fractalsproject/ipmicap ) - * leverage's servers power sensor capture abilities - * retrieves power statistics computed by the server - """ - - ipmicap_ip = None - ipmicap_port = None - min_capture_time = None - raise_exc_on_fail = None - - @classmethod - def __init__(cls, packed_parm, raise_exc_on_fail=True): - - parms = packed_parm.split(":") - ipmicap_ip = parms[0] - ipmicap_port = int(parms[1]) - min_capture_time = float(parms[2]) - cls.ipmicap_ip = ipmicap_ip - cls.ipmicap_port = ipmicap_port - cls.min_capture_time = min_capture_time - cls.raise_exc_on_fail = raise_exc_on_fail - - @classmethod - def _send_msg_to_ipmicap_server(cls, uri, parms): - url = "http://%s:%d/%s" % (cls.ipmicap_ip,cls.ipmicap_port,uri) - resp = requests.get(url,parms) - if resp.status_code!=200: - msg = "T3: Failed to ping ipmicapserver." - if cls.raise_exc_on_fail: - raise Exception(msg) - else: - print("Power: Failed to ping ipmicap server.") - return False - else: - ret_json = resp.json() - return resp.json() - - @classmethod - def enabled(cls): - """ - Returns True if this singleton class has been initialized. - """ - if cls.ipmicap_ip != None: - return True - else: - return False - - @classmethod - def ping(cls): - """ - Ping the IPMICAP server and make sure it's running. - """ - return cls._send_msg_to_ipmicap_server("log",{"ping":1}) - - @classmethod - def start(cls): - """ - Start power capture at the IPMI server. - """ - session_id = str(uuid.uuid4()) - status = cls._send_msg_to_ipmicap_server("session", - {"start":1,"id":session_id}) - if status: - return session_id - else: - return False - - @classmethod - def stop(cls, session_id, all_stats=False): - """ - End power capture at the IPMI server for the session - and returm the computed power consumption. - """ - stop_parm = "all_stats" if all_stats else 1 - power_stats = cls._send_msg_to_ipmicap_server("session", - {"stop":stop_parm, "id": session_id}) - if power_stats: - return power_stats - else: - return False - - @classmethod - def get_stats(cls, session_ids): - """ - Retrieve power capture statistics for capture ids supplied. - """ - raise Exception("Not implemented.") - - @classmethod - def run_has_power_stats(cls, properties): - """ - Determines if the benchmark run has power related metrics. - """ - if "power_consumption" in properties: return True - else: return False - - @classmethod - def detect_power_benchmarks(cls, metrics, res): - """ - Adjust the global metrics based on the availability of - power related benchmarks in the loaded results. - """ - has_power_benchmarks = False - for i, (properties, run) in enumerate(res): - if cls.run_has_power_stats(properties): - has_power_benchmarks = True - break - if has_power_benchmarks: - return True - else: # no power benchmarks and not required, just remove from global benchmarks - #print("Ignoring the global 'wspq' metric because no power benchmarks are present.") - metrics.pop("wspq", None) - return True - - @classmethod - def detect_power_benchmarks_for_plot(cls, args, res ): - """ - If power benchmarks are requested for plot but now power benchmarks are - not present then return False. - """ - required = args.x_axis=='wspq' or args.y_axis=='wspq' - if not required: - return True - - has_power_benchmarks = False - for i, (properties, run) in enumerate(res): - if cls.run_has_power_stats(properties): - has_power_benchmarks = True - break - if has_power_benchmarks and required: return True - else: - print("No power benchmarks found in loaded results.") - return False - - - @classmethod - def compute_watt_seconds_per_query(cls, queries, attrs ): - """ - Retreive the benchmark metric wspq. - """ - return attrs["best_wspq"] - - @classmethod - def run(cls, algo, X, distance, count, run_count, search_type, descriptor ): - """The runner for power consumption is slightly different than the default runner.""" - - capture_time = power_capture.min_capture_time - best_search_time = descriptor["best_search_time"] - - inner_run_count = math.ceil(capture_time/best_search_time) if capture_time > best_search_time else 1 - - print('Run for power capture with %d iterations (via %d/%f) for %d iterations' - % (inner_run_count, capture_time, best_search_time, run_count ) ) - - cap_ids = [] - power_run_counts = [] - power_run_times = [] - power_consumptions = [] - power_tot_queries = [] - - best_power_cons = float('inf') - for i in range(run_count): - cap_id = cls.start() - start = time.time() - for i in range(inner_run_count): - if search_type == "knn": - algo.query(X, count) - else: - algo.range_query(X, count) - total = (time.time() - start) - power_stats = cls.stop(cap_id, all_stats=True) - power_cons = power_stats['tot_power'] - tot_queries = inner_run_count * X.shape[0] - - # Track the best one thus far - best_power_cons = min(best_power_cons, power_cons) - best_tot_queries = tot_queries # Although its always the same now, we may change that - - cap_ids.append(cap_id) - power_run_counts.append( inner_run_count ) - power_run_times.append( total ) - power_consumptions.append( power_cons ) - power_tot_queries.append( tot_queries ) - - power_cons_mean = statistics.mean( power_consumptions ) - power_cons_stdev = statistics.stdev( power_consumptions ) - best_wspq = best_power_cons/best_tot_queries - mean_wspq = power_cons_mean/best_tot_queries - print("wspq: best=%f mean=%f best_tot_queries=%d" % (best_wspq, mean_wspq, best_tot_queries)) - power_stats = {"power_cap_id": cap_ids, - "power_run_count": power_run_counts, - "power_run_time": power_run_times, - "power_consumption":power_consumptions, - "best_power_consumption": best_power_cons, - "inner_run_count": inner_run_count, - "power_consumption_mean": power_cons_mean, - "power_consumption_stdev": power_cons_stdev, - "best_wspq": best_wspq, - "mean_wspq": mean_wspq } - - for k in power_stats.keys(): - descriptor[k] = power_stats[k] - - -# -# To run these unit tests for the power_capture class, type 'python power_capture.py' -# -if __name__ == "__main__": - - print("power capture unit tests") - - ipmicap_ip = "192.168.99.112" # Set to your ipmicap's server ip - ipmicap_port = 3000 # Set to your ipmicap's server port - min_capture_time = -1 - - #power_capture( ipmicap_ip, ipmicap_port, min_capture_time ) - power_capture( "%s:%d:%f" % (ipmicap_ip, ipmicap_port, min_capture_time )) - - print("pinging ipmicap server at %s:%d" % (power_capture.ipmicap_ip, - power_capture.ipmicap_port)) - power_capture.ping() - - print("enabled=", power_capture.enabled()) - - print("start") - cid=power_capture.start() - print("cid=",cid) - - print("stop") - power_capture.stop() - - stats = power_capture.get_stats([cid]) - print("stats=",stats) - - print("all tests passed.") - diff --git a/benchmark/t3/__init__.py b/benchmark/t3/__init__.py deleted file mode 100644 index c3961685a..000000000 --- a/benchmark/t3/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from __future__ import absolute_import diff --git a/benchmark/t3/helper.py b/benchmark/t3/helper.py deleted file mode 100644 index 2a83550fd..000000000 --- a/benchmark/t3/helper.py +++ /dev/null @@ -1,111 +0,0 @@ -import os -import docker - -from benchmark.datasets import DATASETS, BigANNDataset - -def print_cuda_versions(): - info = subprocess.check_output(['nvidia-smi', '--query-gpu=driver_version','--format=csv']) - print(info) - info = subprocess.check_output(['nvcc', '--version']) - print(info) - - -def t3_create_container( definition, cmd, cpu_limit, mem_limit): - - if definition.algorithm in [ 'faiss-t3' ]: - - print("Launching GPU container") - container = create_container_with_gpu_support( - docker.from_env(), - definition.docker_tag, - cmd, - volumes={ - os.path.abspath('benchmark'): - {'bind': '/home/app/benchmark', 'mode': 'ro'}, - os.path.abspath('data'): - {'bind': '/home/app/data', 'mode': 'rw'}, - os.path.abspath('results'): - {'bind': '/home/app/results', 'mode': 'rw'}, - }, - cpuset_cpus=cpu_limit, - mem_limit=mem_limit, - detach=True) - container.start() - return container - - else: - raise Exception("Docker invoke not supported for this algorithm.") - -def create_container_with_gpu_support(client, image, command, volumes, **kwargs): - - - from docker.models.images import Image - from docker.models.containers import _create_container_args - - if isinstance(image, Image): - image = image.id - - os.environ['NVIDIA_VISIBLE_DEVICES']='all' - - kwargs['image'] = image - kwargs['command'] = command - kwargs['version'] = client.containers.client.api._version - kwargs['volumes'] = volumes - create_kwargs = _create_container_args(kwargs) - - device_request = { - 'Driver': 'nvidia', - 'Capabilities': [['gpu'], ['nvidia']], - 'Count': -1, # enable all gpus - } - - if device_request is not None: - create_kwargs['host_config']['DeviceRequests'] = [device_request] - - resp = client.api.create_container(**create_kwargs) - return client.containers.get(resp['Id']) - -def create_container_with_network_host_support(client, image, command, volumes, **kwargs): - - kwargs['image'] = image - kwargs['command'] = command - kwargs['version'] = client.containers.client.api._version - kwargs['volumes'] = volumes - kwargs['network'] = "host" - create_kwargs = _create_container_args(kwargs) - - resp = client.api.create_container(**create_kwargs) - return client.containers.get(resp['Id']) - - -class BigANNDatasetAngular(BigANNDataset): - - def __init__(self, *args, **kwargs): - ret = super().__init__(*args, **kwargs) - if self.gt_fn: - print("You must compute and replace the ground truth file here:", self.gt_fn ) - else: - gt_fn = self._form_gt_fn() - if os.path.exists( os.path.join( self.basedir, gt_fn)): - #print("file %s already exists" % gt_fn ) - self.gt_fn = gt_fn - else: - print("You must compute the ground and create the file in here:", - os.path.join( self.basedir, gt_fn ) ) - return ret - - def _form_gt_fn(self): - gt_fn = "gt_angular.ibin" - print("ds", self.ds_fn, self.nb, 10**9) - if self.nb < 10**9: - gt_fn += ".crop_nb_%d" % ( self.nb ) - return gt_fn - - def get_groundtruth(self, *args, **kwargs): - self.gt_fn = self.gt_fn - return super().get_groundtruth(*args, **kwargs) - - def distance(self): - return "angular" - - diff --git a/cmt_logo.png b/cmt_logo.png new file mode 100644 index 000000000..4ad6a8404 Binary files /dev/null and b/cmt_logo.png differ diff --git a/create_dataset.py b/create_dataset.py deleted file mode 100644 index bc8062943..000000000 --- a/create_dataset.py +++ /dev/null @@ -1,16 +0,0 @@ -import argparse -from benchmark.datasets import DATASETS - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.add_argument( - '--dataset', - choices=DATASETS.keys(), - required=True) - parser.add_argument( - '--skip-data', - action='store_true', - help='skip downloading base vectors') - args = parser.parse_args() - ds = DATASETS[args.dataset]() - ds.prepare(True if args.skip_data else False) diff --git a/create_website.py b/create_website.py deleted file mode 100644 index 5c90304de..000000000 --- a/create_website.py +++ /dev/null @@ -1,278 +0,0 @@ -import matplotlib as mpl -mpl.use('Agg') # noqa -import argparse -import os -import json -import pickle -import yaml -import numpy -import hashlib -from jinja2 import Environment, FileSystemLoader - -from benchmark import results -#from benchmark.datasets import get_dataset -from benchmark.plotting.plot_variants import (all_plot_variants - as plot_variants) -from benchmark.plotting.metrics import all_metrics as metrics -from benchmark.plotting.utils import (get_plot_label, compute_metrics, - compute_metrics_all_runs, - create_pointset, - create_linestyles) -import plot - -colors = [ - "rgba(166,206,227,1)", - "rgba(31,120,180,1)", - "rgba(178,223,138,1)", - "rgba(51,160,44,1)", - "rgba(251,154,153,1)", - "rgba(227,26,28,1)", - "rgba(253,191,111,1)", - "rgba(255,127,0,1)", - "rgba(202,178,214,1)" -] - -point_styles = { - "o": "circle", - "<": "triangle", - "*": "star", - "x": "cross", - "+": "rect", -} - - -def convert_color(color): - r, g, b, a = color - return "rgba(%(r)d, %(g)d, %(b)d, %(a)d)" % { - "r": r * 255, "g": g * 255, "b": b * 255, "a": a} - - -def convert_linestyle(ls): - new_ls = {} - for algo in ls.keys(): - algostyle = ls[algo] - new_ls[algo] = (convert_color(algostyle[0]), - convert_color(algostyle[1]), - algostyle[2], point_styles[algostyle[3]]) - return new_ls - - -def get_run_desc(properties): - return "%(dataset)s_%(count)d_%(distance)s" % properties - - -def get_dataset_from_desc(desc): - return desc.split("_")[0] - - -def get_count_from_desc(desc): - return desc.split("_")[1] - - -def get_distance_from_desc(desc): - return desc.split("_")[2] - - -def get_dataset_label(desc): - return "{} (k = {})".format(get_dataset_from_desc(desc), - get_count_from_desc(desc)) - - -def directory_path(s): - if not os.path.isdir(s): - raise argparse.ArgumentTypeError("'%s' is not a directory" % s) - return s + "/" - - -def prepare_data(data, xn, yn): - """Change format from (algo, instance, dict) to (algo, instance, x, y).""" - res = [] - for algo, algo_name, result in data: - res.append((algo, algo_name, result[xn], result[yn])) - return res - - -parser = argparse.ArgumentParser() -parser.add_argument( - '--plottype', - help='Generate only the plots specified', - nargs='*', - choices=plot_variants.keys(), - default=plot_variants.keys()) -parser.add_argument( - '--outputdir', - help='Select output directory', - default='.', - type=directory_path, - action='store') -parser.add_argument( - '--latex', - help='generates latex code for each plot', - action='store_true') -parser.add_argument( - '--scatter', - help='create scatterplot for data', - action='store_true') -parser.add_argument( - '--recompute', - help='Clears the cache and recomputes the metrics', - action='store_true') -args = parser.parse_args() - - -def get_lines(all_data, xn, yn, render_all_points): - """ For each algorithm run on a dataset, obtain its performance - curve coords.""" - plot_data = [] - for algo in sorted(all_data.keys(), key=lambda x: x.lower()): - xs, ys, ls, axs, ays, als = \ - create_pointset(prepare_data(all_data[algo], xn, yn), xn, yn) - if render_all_points: - xs, ys, ls = axs, ays, als - plot_data.append({"name": algo, "coords": zip(xs, ys), "labels": ls, - "scatter": render_all_points}) - return plot_data - - -def create_plot(all_data, xn, yn, linestyle, j2_env, additional_label="", - plottype="line"): - xm, ym = (metrics[xn], metrics[yn]) - render_all_points = plottype == "bubble" - plot_data = get_lines(all_data, xn, yn, render_all_points) - latex_code = j2_env.get_template("latex.template").\ - render(plot_data=plot_data, caption=get_plot_label(xm, ym), - xlabel=xm["description"], ylabel=ym["description"]) - plot_data = get_lines(all_data, xn, yn, render_all_points) - button_label = hashlib.sha224((get_plot_label(xm, ym) + additional_label) - .encode("utf-8")).hexdigest() - return j2_env.get_template("chartjs.template").\ - render(args=args, latex_code=latex_code, button_label=button_label, - data_points=plot_data, - xlabel=xm["description"], ylabel=ym["description"], - plottype=plottype, plot_label=get_plot_label(xm, ym), - label=additional_label, linestyle=linestyle, - render_all_points=render_all_points) - - -def build_detail_site(data, label_func, j2_env, linestyles, batch=False): - for (name, runs) in data.items(): - print("Building '%s'" % name) - all_runs = runs.keys() - label = label_func(name) - data = {"normal": [], "scatter": []} - - for plottype in args.plottype: - xn, yn = plot_variants[plottype] - data["normal"].append(create_plot( - runs, xn, yn, convert_linestyle(linestyles), j2_env)) - if args.scatter: - data["scatter"].append( - create_plot(runs, xn, yn, convert_linestyle(linestyles), - j2_env, "Scatterplot ", "bubble")) - - # create png plot for summary page - data_for_plot = {} - for k in runs.keys(): - data_for_plot[k] = prepare_data(runs[k], 'k-nn', 'qps') - plot.create_plot( - data_for_plot, False, - 'linear', 'log', 'k-nn', 'qps', - args.outputdir + name + '.png', - linestyles, batch) - output_path = \ - args.outputdir + name + '.html' - with open(output_path, "w") as text_file: - text_file.write(j2_env.get_template("detail_page.html"). - render(title=label, plot_data=data, - args=args, batch=batch)) - - -def build_index_site(datasets, algorithms, j2_env, file_name): - dataset_data = {'batch': [], 'non-batch': []} - for mode in ['batch', 'non-batch']: - distance_measures = sorted( - set([get_distance_from_desc(e) for e in datasets[mode].keys()])) - sorted_datasets = sorted( - set([get_dataset_from_desc(e) for e in datasets[mode].keys()])) - - for dm in distance_measures: - d = {"name": dm.capitalize(), "entries": []} - for ds in sorted_datasets: - matching_datasets = [e for e in datasets[mode].keys() - if get_dataset_from_desc(e) == ds and # noqa - get_distance_from_desc(e) == dm] - sorted_matches = sorted( - matching_datasets, - key=lambda e: int(get_count_from_desc(e))) - for idd in sorted_matches: - d["entries"].append( - {"name": idd, "desc": get_dataset_label(idd)}) - dataset_data[mode].append(d) - - with open(args.outputdir + "index.html", "w") as text_file: - text_file.write(j2_env.get_template("summary.html"). - render(title="Big ANN Benchmarks", - dataset_with_distances=dataset_data, - algorithms=algorithms)) - - -def build_index_site_without_data(j2_env, file_name): - with open(args.outputdir + "index.html", "w") as text_file: - text_file.write(j2_env.get_template("summary.html"). - render(title="Big ANN Benchmarks")) - -# def load_all_results(): -# """Read all result files and compute all metrics""" -# all_runs_by_dataset = {'batch': {}, 'non-batch': {}} -# all_runs_by_algorithm = {'batch': {}, 'non-batch': {}} -# cached_true_dist = [] -# old_sdn = None -# for mode in ["non-batch", "batch"]: -# for properties, f in results.load_all_results(batch_mode=(mode == "batch")): -# sdn = get_run_desc(properties) -# if sdn != old_sdn: -# dataset = get_dataset(properties["dataset"]) -# cached_true_dist = list(dataset["distances"]) -# old_sdn = sdn -# algo_ds = get_dataset_label(sdn) -# desc_suffix = ("-batch" if mode == "batch" else "") -# algo = properties["algo"] + desc_suffix -# sdn += desc_suffix -# ms = compute_all_metrics( -# cached_true_dist, f, properties, args.recompute) -# all_runs_by_algorithm[mode].setdefault( -# algo, {}).setdefault(algo_ds, []).append(ms) -# all_runs_by_dataset[mode].setdefault( -# sdn, {}).setdefault(algo, []).append(ms) - -# return (all_runs_by_dataset, all_runs_by_algorithm) - - -j2_env = Environment(loader=FileSystemLoader("./templates/"), trim_blocks=True) -j2_env.globals.update(zip=zip, len=len) -#runs_by_ds, runs_by_algo = load_all_results() -# dataset_names = [get_dataset_label(x) for x in list( -# runs_by_ds['batch'].keys()) + list(runs_by_ds['non-batch'].keys())] -# algorithm_names = list(runs_by_algo['batch'].keys( -# )) + list(runs_by_algo['non-batch'].keys()) - -# linestyles = {**create_linestyles(dataset_names), -# **create_linestyles(algorithm_names)} - -# build_detail_site( -# runs_by_ds['non-batch'], -# lambda label: get_dataset_label(label), j2_env, linestyles, False) - -# build_detail_site( -# runs_by_ds['batch'], -# lambda label: get_dataset_label(label), j2_env, linestyles, True) - -# build_detail_site( -# runs_by_algo['non-batch'], -# lambda x: x, j2_env, linestyles, False) - -# build_detail_site( -# runs_by_algo['batch'], lambda x: x, j2_env, linestyles, True) - -#build_index_site(runs_by_ds, runs_by_algo, j2_env, "index.html") -build_index_site_without_data(j2_env, "index.html") diff --git a/data_export.py b/data_export.py deleted file mode 100644 index 83489c319..000000000 --- a/data_export.py +++ /dev/null @@ -1,95 +0,0 @@ -import pandas as pd -import sys -import os -import matplotlib as mpl -mpl.use('Agg') -import matplotlib.pyplot as plt -import argparse -import bz2 -import sys - -from benchmark.datasets import DATASETS -from benchmark.plotting.utils import compute_metrics_all_runs -from benchmark.results import load_all_results, get_unique_algorithms - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.add_argument( - '--output', - help='Path to the output csv file', - required=True) - parser.add_argument( - '--recompute', - action='store_true', - help='Path to the output csv file') - parser.add_argument( - - '--private-query', - help='Use the private queries and ground truth', - action='store_true') - parser.add_argument( - '--sensors', - action='store_true', - help='Export sensors data if available') - parser.add_argument( - '--search-times', - action='store_true', - help='Export search times data if available') - parser.add_argument( - '--detect-caching', - type=float, - default=None, - metavar="THRESHOLD", - help='Try to detect query response caching by analyzing search times. Supply a threshold betwee 0 and 1, such as 0.3.') - args = parser.parse_args() - - if args.detect_caching!=None and not args.search_times: - print("Error: --detect_caching requires the --search_times flag") - sys.exit(1) - - datasets = DATASETS.keys() - dfs = [] - - is_first = True - for dataset_name in datasets: - print("Looking at dataset", dataset_name) - dataset = DATASETS[dataset_name]() - results = load_all_results(dataset_name) - results = compute_metrics_all_runs(dataset, results, args.recompute, \ - args.sensors, args.search_times, args.private_query) - cleaned = [] - for result in results: - if 'k-nn' in result: - result['recall/ap'] = result['k-nn'] - del result['k-nn'] - if 'ap' in result: - result['recall/ap'] = result['ap'] - del result['ap'] - if args.sensors: - if 'wspq' not in result: - print('Warning: wspq sensor data not available.') - if args.search_times: - search_times = result['search_times'] - if 'search_times' in result: - # create a space separated list suitable as column for a csv - result['search_times'] = \ - " ".join( [str(el) for el in search_times ] ) - - if args.detect_caching != None: - print("%s: Checking for response caching for these search times->" % dataset_name, search_times) - percent_improvement = (search_times[0]-search_times[-1])/search_times[0] - caching = percent_improvement > args.detect_caching - result['caching'] = "%d %f %f" % ( 1 if caching else 0, args.detect_caching, percent_improvement ) - if caching: - print("Possible caching discovered: %.3f > %.3f" % ( percent_improvement, args.detect_caching) ) - else: - print("No response caching detected.") - - else: - print("Warning: 'search_times' not available.") - cleaned.append(result) - dfs.append(pd.DataFrame(cleaned)) - if len(dfs) > 0: - data = pd.concat(dfs) - data.to_csv(args.output, index=False) - diff --git a/dataset_preparation/FB_ssnpp_dataset.md b/dataset_preparation/FB_ssnpp_dataset.md deleted file mode 100644 index cac2500d1..000000000 --- a/dataset_preparation/FB_ssnpp_dataset.md +++ /dev/null @@ -1,83 +0,0 @@ -# The Facebook SimSearchNet++ dataset - -SimSearchNet++ features are extracted from the images. -In production, the features are used for image copy detection for integrity purposes. -There is some detail in [this blog post](https://ai.facebook.com/blog/using-ai-to-detect-covid-19-misinformation-and-exploitative-content) - -The SSN++ features are intially in 512 dimensions, L2-normalized and in floating-point. -They are compared with a given threshold (squared L2 < 0.8) and images are deemed to match and input to further processing if the distance between images are below that threshold. - -## Data preparation - -This dataset is built form public Instagram images from a variety of countries. -The SSN++ features extracted from the images have been de-duplicated. After deduplication about 1.17B vectors remain. - -99% of the dataset is used for the database vectors. - -1% of the dataset is set apart for queries, experiments and PCA training. - -### Selecting queries - -We randomly sample 3 sets of 1M vectors each from the 1%: A, B and C. - -A are the candidate query vectors. -We compute the exact range search matches of A into database B with threshold 0.8. -This yields 124210 results, with a distance histogram that looks like: - -![](fb_ssnpp_images/distance_histogram.png) - -The leftmost peak corresponds to very near image copies. -When the threshold is relaxed, more images get matches, and the 0.8 threshold is quite arbitrary. -With a 1B dataset the number of matches is expected to increase to 124k * 1000 = 124M matches. - -It is useful to look at the number of results per query vector and the number of database vectors that appear to be matching with a query vector: - -![](fb_ssnpp_images/result_stats.png) - -Both plots are in log-log scale and sorted by decreasing frequencies. -This way of visualizing is useful because the data distribution is very bursty: -for example, 10 query vectors get almost 100 results and fewer than 20k vectors of the 1M queries have a result at all. - -This happens often on range search with real data because (1) most images are unique (no match) and (2) there are "meme" images that are posted many times with small variations, which creates clusters of images with many matches. - -From a benchmarking point of view, memes are not interesting because they rarely appear in search results. On the other hand, vectors with no matches are relevant for applications: the ANN algorihtm should indeed be able to not retrieve anything for them. -Therefore, we do a pass to remove the candidate queries with most matches. -We arbitrarily chose a threshold of 30 matches, which reduces the number of candidate queries to 999086 and the total number of matches to 71357. - -We keep the first and second 100k of the resulting query candidates as public query set and held-out query set. - -### Compressing the dataset - -To make the dataset less bulky, the features have been reduced to 256 dimensions by PCA (trained on subset C) and encoded in uint8. -The effect of this compression is a loss of precision w.r.t. the ground-truth matches. -This means that the comparison threshold is also adjusted to a squared L2 distance. -We select the "compressed" L2 threshold as the one that maximizes the IoU of the results wrt. the float32 results. -This happens for a squared L2 threshold of **96237**. - -![](fb_ssnpp_images/IoU.png) - -The challenge ground truth is recomputed *after* compression with this threshold. - -## Data files - -Database: - -https://dl.fbaipublicfiles.com/billion-scale-ann-benchmarks/FB_ssnpp_database.u8bin - -100k public queries: - -https://dl.fbaipublicfiles.com/billion-scale-ann-benchmarks/FB_ssnpp_public_queries.u8bin - -Ground truth: - -https://dl.fbaipublicfiles.com/billion-scale-ann-benchmarks/FB_ssnpp_public_queries_GT.rangeres - -100k held-out queries (the XXXX is known only to the organizers): - -https://dl.fbaipublicfiles.com/billion-scale-ann-benchmarks/FB_ssnpp_heldout_queries_XXXX.u8bin - -Ground truth: TODO - -## License - -The FB SSN++ dataset is licensed under the [CC-by-NC](https://creativecommons.org/licenses/by-nc/2.0/) license. diff --git a/dataset_preparation/fb_ssnpp_images/IoU.png b/dataset_preparation/fb_ssnpp_images/IoU.png deleted file mode 100644 index 76dde5fde..000000000 Binary files a/dataset_preparation/fb_ssnpp_images/IoU.png and /dev/null differ diff --git a/dataset_preparation/fb_ssnpp_images/distance_histogram.png b/dataset_preparation/fb_ssnpp_images/distance_histogram.png deleted file mode 100644 index ecd2ebc83..000000000 Binary files a/dataset_preparation/fb_ssnpp_images/distance_histogram.png and /dev/null differ diff --git a/dataset_preparation/fb_ssnpp_images/pr_compression.png b/dataset_preparation/fb_ssnpp_images/pr_compression.png deleted file mode 100644 index 7f3c286f2..000000000 Binary files a/dataset_preparation/fb_ssnpp_images/pr_compression.png and /dev/null differ diff --git a/dataset_preparation/fb_ssnpp_images/result_stats.png b/dataset_preparation/fb_ssnpp_images/result_stats.png deleted file mode 100644 index c1a3f0096..000000000 Binary files a/dataset_preparation/fb_ssnpp_images/result_stats.png and /dev/null differ diff --git a/dataset_preparation/make_groundtruth.py b/dataset_preparation/make_groundtruth.py deleted file mode 100644 index 54eae47b7..000000000 --- a/dataset_preparation/make_groundtruth.py +++ /dev/null @@ -1,288 +0,0 @@ -import argparse -import logging -import time -import resource -import pdb - -import numpy as np - -import faiss - -from faiss.contrib.exhaustive_search import range_search_gpu - -import benchmark.datasets -from benchmark.datasets import DATASETS - -""" -for dataset in deep-1B bigann-1B ssnpp-1B text2image-1B msturing-1B msspacev-1B ; do - sbatch --gres=gpu:4 --ntasks=1 --time=30:00:00 --cpus-per-task=40 \ - --partition=learnlab --mem=250g --nodes=1 \ - -J GT.100M.$dataset.d -o logs/GT.100M.$dataset.d.log \ - --wrap "PYTHONPATH=. python dataset_preparation/make_groundtruth.py \ - --dataset $dataset --split 10 0 --prepare \ - --o /checkpoint/matthijs/billion-scale-ann-benchmarks/GT_100M/${dataset/1B/100M} - " -done - -""" - - -class ResultHeap: - """Accumulate query results from a sliced dataset. The final result will - be in self.D, self.I.""" - - def __init__(self, nq, k, keep_max=False): - " nq: number of query vectors, k: number of results per query " - self.I = np.zeros((nq, k), dtype='int64') - self.D = np.zeros((nq, k), dtype='float32') - self.nq, self.k = nq, k - if keep_max: - heaps = faiss.float_minheap_array_t() - else: - heaps = faiss.float_maxheap_array_t() - heaps.k = k - heaps.nh = nq - heaps.val = faiss.swig_ptr(self.D) - heaps.ids = faiss.swig_ptr(self.I) - heaps.heapify() - self.heaps = heaps - - def add_result(self, D, I): - """D, I do not need to be in a particular order (heap or sorted)""" - assert D.shape == (self.nq, self.k) - assert I.shape == (self.nq, self.k) - self.heaps.addn_with_ids( - self.k, faiss.swig_ptr(D), - faiss.swig_ptr(I), self.k) - - def finalize(self): - self.heaps.reorder() - - -def knn_ground_truth(ds, k, bs, split): - """Computes the exact KNN search results for a dataset that possibly - does not fit in RAM but for which we have an iterator that - returns it block by block. - """ - print("loading queries") - xq = ds.get_queries() - - if ds.distance() == "angular": - faiss.normalize_L2(xq) - - print("knn_ground_truth queries size %s k=%d" % (xq.shape, k)) - - t0 = time.time() - nq, d = xq.shape - - metric_type = ( - faiss.METRIC_L2 if ds.distance() == "euclidean" else - faiss.METRIC_INNER_PRODUCT if ds.distance() in ("ip", "angular") else - 1/0 - ) - rh = ResultHeap(nq, k, keep_max=metric_type == faiss.METRIC_INNER_PRODUCT) - - index = faiss.IndexFlat(d, metric_type) - - if faiss.get_num_gpus(): - print('running on %d GPUs' % faiss.get_num_gpus()) - index = faiss.index_cpu_to_all_gpus(index) - - # compute ground-truth by blocks, and add to heaps - i0 = 0 - for xbi in ds.get_dataset_iterator(bs=bs, split=split): - ni = xbi.shape[0] - if ds.distance() == "angular": - faiss.normalize_L2(xbi) - - index.add(xbi) - D, I = index.search(xq, k) - I += i0 - rh.add_result(D, I) - index.reset() - i0 += ni - print(f"[{time.time() - t0:.2f} s] {i0} / {ds.nb} vectors", end="\r", flush=True) - - rh.finalize() - print() - print("GT time: %.3f s (%d vectors)" % (time.time() - t0, i0)) - - return rh.D, rh.I - - -def range_ground_truth(ds, radius, bs, split): - """Computes the exact range search results for a dataset that possibly - does not fit in RAM but for which we have an iterator that - returns it block by block. - """ - print("loading queries") - xq = ds.get_queries() - - if ds.distance() == "angular": - faiss.normalize_L2(xq) - - print("range_ground_truth queries size %s radius=%g" % (xq.shape, radius)) - - t0 = time.time() - nq, d = xq.shape - - metric_type = ( - faiss.METRIC_L2 if ds.distance() == "euclidean" else - faiss.METRIC_INNER_PRODUCT if ds.distance() in ("ip", "angular") else - 1/0 - ) - - index = faiss.IndexFlat(d, metric_type) - - if faiss.get_num_gpus(): - print('running on %d GPUs' % faiss.get_num_gpus()) - index_gpu = faiss.index_cpu_to_all_gpus(index) - else: - index_gpu = None - - results = [] - - # compute ground-truth by blocks, and add to heaps - i0 = 0 - tot_res = 0 - for xbi in ds.get_dataset_iterator(bs=bs, split=split): - ni = xbi.shape[0] - if ds.distance() == "angular": - faiss.normalize_L2(xbi) - - index.add(xbi) - if index_gpu is None: - lims, D, I = index.range_search(xq, radius) - else: - index_gpu.add(xbi) - lims, D, I = range_search_gpu(xq, radius, index_gpu, index) - index_gpu.reset() - index.reset() - I = I.astype("int32") - I += i0 - results.append((lims, D, I)) - i0 += ni - tot_res += len(D) - print(f"[{time.time() - t0:.2f} s] {i0} / {ds.nb} vectors, {tot_res} matches", - end="\r", flush=True) - print() - print("merge into single table") - # merge all results in a single table - nres = np.zeros(nq, dtype="int32") - D = [] - I = [] - for q in range(nq): - nres_q = 0 - for lims_i, Di, Ii in results: - l0, l1 = lims_i[q], lims_i[q + 1] - if l1 > l0: - nres_q += l1 - l0 - D.append(Di[l0:l1]) - I.append(Ii[l0:l1]) - nres[q] = nres_q - - D = np.hstack(D) - I = np.hstack(I) - assert len(D) == nres.sum() == len(I) - print("GT time: %.3f s (%d vectors)" % (time.time() - t0, i0)) - return nres, D, I - -def usbin_write(ids, dist, fname): - ids = np.ascontiguousarray(ids, dtype="int32") - dist = np.ascontiguousarray(dist, dtype="float32") - assert ids.shape == dist.shape - f = open(fname, "wb") - n, d = dist.shape - np.array([n, d], dtype='uint32').tofile(f) - ids.tofile(f) - dist.tofile(f) - - -def range_result_write(nres, I, D, fname): - """ write the range search file format: - int32 n_queries - int32 total_res - int32[n_queries] nb_results_per_query - int32[total_res] database_ids - float32[total_res] distances - """ - nres = np.ascontiguousarray(nres, dtype="int32") - I = np.ascontiguousarray(I, dtype="int32") - D = np.ascontiguousarray(D, dtype="float32") - assert I.shape == D.shape - total_res = nres.sum() - nq = len(nres) - assert I.shape == (total_res, ) - f = open(fname, "wb") - np.array([nq, total_res], dtype='uint32').tofile(f) - nres.tofile(f) - I.tofile(f) - D.tofile(f) - - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - - def aa(*args, **kwargs): - group.add_argument(*args, **kwargs) - - group = parser.add_argument_group('dataset options') - aa('--dataset', choices=DATASETS.keys(), required=True) - aa('--prepare', default=False, action="store_true", - help="call prepare() to download the dataset before computing") - aa('--basedir', help="override basedir for dataset") - aa('--split', type=int, nargs=2, default=[1, 0], - help="split that must be handled") - - group = parser.add_argument_group('computation options') - # determined from ds - # aa('--range_search', action="store_true", help="do range search instead of kNN search") - aa('--k', default=100, type=int, help="number of nearest kNN neighbors to search") - aa('--radius', default=96237, type=float, help="range search radius") - aa('--bs', default=100_000, type=int, help="batch size for database iterator") - aa("--maxRAM", default=100, type=int, help="set max RSS in GB (avoid OOM crash)") - - group = parser.add_argument_group('output options') - aa('--o', default="", help="output file name") - - args = parser.parse_args() - - print("args:", args) - - if args.basedir: - print("setting datasets basedir to", args.basedir) - benchmark.datasets.BASEDIR - benchmark.datasets.BASEDIR = args.basedir - - if args.maxRAM > 0: - print("setting max RSS to", args.maxRAM, "GiB") - resource.setrlimit( - resource.RLIMIT_DATA, (args.maxRAM * 1024 ** 3, resource.RLIM_INFINITY) - ) - - ds = DATASETS[args.dataset]() - - print(ds) - - if args.prepare: - print("downloading dataset...") - ds.prepare() - print("dataset ready") - - if False: # args.crop_nb != -1: - print("cropping dataset to", args.crop_nb) - ds.nb = args.crop_nb - print("new ds:", ds) - - - if ds.search_type() == "knn": - D, I = knn_ground_truth(ds, k=args.k, bs=args.bs, split=args.split) - print(f"writing index matrix of size {I.shape} to {args.o}") - # write in the usbin format - usbin_write(I, D, args.o) - elif ds.search_type() == "range": - nres, D, I = range_ground_truth(ds, radius=args.radius, bs=args.bs, split=args.split) - print(f"writing results {I.shape} to {args.o}") - range_result_write(nres, I, D, args.o) - - diff --git a/dataset_preparation/prepare_bigann.py b/dataset_preparation/prepare_bigann.py deleted file mode 100644 index 9c92a1087..000000000 --- a/dataset_preparation/prepare_bigann.py +++ /dev/null @@ -1,88 +0,0 @@ - -""" -Prepare the bigann dataset in the format expected for the 1B ANN competition - -""" - -import sys - -from faiss.contrib import datasets as faiss_datasets -import numpy as np - - -# source data is in the native Faiss format -ds = faiss_datasets.DatasetBigANN() - -stage = int(sys.argv[1]) - -outdir = "/scratch/matthijs/bigann_competiton_format/" - -def u8bin_write(x, fname): - assert x.dtype == 'uint8' - f = open(fname, "wb") - n, d = x.shape - np.array([n, d], dtype='uint32').tofile(f) - x.tofile(f) - -def ibin_write(x, fname): - assert x.dtype == 'int32' - f = open(fname, "wb") - n, d = x.shape - np.array([n, d], dtype='uint32').tofile(f) - x.tofile(f) - - -if stage == 1: # convert query format - # xq = ds.get_queries() - xq = faiss_datasets.bvecs_mmap(ds.basedir + 'bigann_query.bvecs') - xq = np.ascontiguousarray(xq) - u8bin_write(xq, outdir + "query.public.10K.u8bin") - -elif stage == 2: # sample new queries from train set - secretkey = int(sys.argv[2]) - rs = np.random.RandomState(secretkey) - xt = faiss_datasets.bvecs_mmap(ds.basedir + 'bigann_learn.bvecs') - print("size", xt.shape) - selection = rs.choice(len(xt), 10000, replace=False) - u8bin_write(xt[selection], outdir + f"query.private.{secretkey}.10K.u8bin") - -elif stage == 3: # convert 10M subset - - xb = faiss_datasets.bvecs_mmap(ds.basedir + 'bigann_base.bvecs') - u8bin_write(xb[:10**7], outdir + "base.10M.u8bin") - -elif stage == 4: # write the 1B vectors... - - xb = faiss_datasets.bvecs_mmap(ds.basedir + 'bigann_base.bvecs') - bs = 10**6 - f = open(outdir + "base.1B.u8bin", "wb") - np.array(xb.shape, dtype='uint32').tofile(f) - for i in range(1000): - print(i, end="\r", flush=True) - xb[i * bs : (i + 1) * bs].tofile(f) - -elif stage == 5: # convert the training vectors - - xb = faiss_datasets.bvecs_mmap(ds.basedir + 'bigann_learn.bvecs') - bs = 10**6 - f = open(outdir + "learn.100M.u8bin", "wb") - np.array(xb.shape, dtype='uint32').tofile(f) - for i in range(100): - print(i, end="\r", flush=True) - xb[i * bs : (i + 1) * bs].tofile(f) - -elif stage == 6: - # convert ground-truth files for public queries - gt = ds.get_groundtruth() - ibin_write(gt, outdir + "GT.public.1B.ibin") - - ds10M = faiss_datasets.DatasetBigANN(10) - gt = ds.get_groundtruth() - ibin_write(gt, outdir + "GT.public.10M.ibin") - - - - - - - diff --git a/dataset_preparation/prepare_fb_ssnpp.py b/dataset_preparation/prepare_fb_ssnpp.py deleted file mode 100644 index 59c5fcd06..000000000 --- a/dataset_preparation/prepare_fb_ssnpp.py +++ /dev/null @@ -1,39 +0,0 @@ - -""" -Prepare the FB SSN++ dataset in the format expected for the 1B ANN competition - -The datafiles have already been produced on the prod side: - -- FB_ssnpp_database.u8bin: the 1B database vectors, deduplicated, already - in correct format - -- 1M_queries_no_bursts_compressed.npy: a little less than 1M query vectors, - selected not to be bursty - -""" -import sys -import numpy as np - -secret_suffix = sys.argv[1] - -basedir = "/checkpoint/matthijs/billion-scale-ann-benchmarks/FB_ssnpp/" - -def u8bin_write(x, fname): - assert x.dtype == 'uint8' - f = open(fname, "wb") - n, d = x.shape - np.array([n, d], dtype='uint32').tofile(f) - x.tofile(f) - -xqall_fp32 = np.load(basedir + "1M_queries_no_bursts_compressed.npy") -xqall = xqall_fp32.astype('uint8') -assert np.all(xqall == xqall_fp32) -u8bin_write( - xqall[:10**5], - basedir + "FB_ssnpp_public_queries.u8bin" -) -u8bin_write( - xqall[10**5: 2 * 10**5], - basedir + "FB_ssnpp_heldout_queries_" + secret_suffix + ".u8bin" -) - diff --git a/discord-logo-white.svg b/discord-logo-white.svg new file mode 100644 index 000000000..d81da09a5 --- /dev/null +++ b/discord-logo-white.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs/index.html b/docs/index.html index ba195f130..f7b2942da 100644 --- a/docs/index.html +++ b/docs/index.html @@ -1,556 +1,556 @@ - - - - - - - - - Big ANN Benchmarks - - - - - - - - - - - - - - - -
-

Billion-Scale Approximate Nearest Neighbor Search Challenge: NeurIPS'21 competition track

- -

-
- - - - Evaluation framework on github. Subscribe to our - Medium channel - for discussions and announcements. - - - - -
-

NeurIPS'21 leaderboard and session schedule

- -

- Track T1/T2 leaderboard.
- Track T3 leaderboard. -

- - The NeurIPS session for this competition took place on Dec 8. - Find slides of the talks and videos below.
-
    -
  • 11:05-11:25: Overview Talk (slides)
  • -
  • 12:00-12:45: Overview of results presented by organizers, followed by Q&A (video)
  • -
      -
    • Standard hardware tracks T1 and T2 results (slides)
    • -
    • Custom hardware track T3 results (slides)
    • -
    -
  • 12:45-13:20: Invited talk 1 by Prof. Alexandr Andoni: Learning to Hash Robustly, with Guarantees (slides, video)
  • -
  • 13:20-13:55: Invited talk 2 by Prof. Anshumali Shrivastava:Iterative Repartitioning for Learning to Hash and the Power of k-Choices (slides,video)
  • -
  • 13:55-14:30: Talks from track winners. -
      -
    • Track 1: kst_ann_t1 Li Liu, Jin Yu, Guohao Dai, Wei Wu, Yu Qiao, Yu Wang, Lingzhi Liu, Kuaishou Technology and Tsinghua University (slides, video)
    • -
    • Track 2: BBANN Xiaomeng Yi, Xiaofan Luan, Weizhi Xu, Qianya Cheng, Jigao Luo, Xiangyu Wang, Jiquan Long, Xiao Yan, Zheng Bian, Jiarui Luo, Shengjun Li, Chengming Li, Zilliz and Southern University of Science and Technology (slides, video)
    • -
    • Track 3: OptaNNe Sourabh Dongaonkar, Mark Hildebrand, Mariano Tepper, Cecilia Aguerrebere, Ted Willke, Jawad Khan, Intel Corporation, Intel Labs and UC Davis (slides, video)
    • -
    -
  • -
  • 14:30-15:00: Open discussion on competition and future directions (github thread, video)
  • -
- Additional material:Overview Talk. -

- - - -

- Abstract for Invited talk: "Learning to Hash Robustly, with Guarantees"
- There is a gap between the high-dimensional nearest neighbor search - (NNS) algorithms achieving the best worst-case guarantees and the - top-performing ones in practice. The former are based on indexing via - the randomized Locality Sensitive Hashing (LSH), and its - derivatives. The latter "learn" the best indexing method in order to - speed-up NNS, crucially adapting to the structure of the given - dataset. Alas, the latter also almost always come at the cost of - losing the guarantees of either correctness or robust performance on - adversarial queries (or apply to datasets with an assumed extra - structure/model). - - How can we bridge these two perspectives and bring the best of both - worlds? As a step in this direction, we will talk about an NNS algorithm - that has worst-case guarantees essentially matching that of - theoretical algorithms, while optimizing the hashing to the structure - of the dataset (think instance-optimal algorithms) for performance on - the minimum-performing query. We will discuss the algorithm's ability - to optimize for a given dataset from both theoretical and practical - perspective. -

- -

- Abstract for Invited talk: "Iterative Repartitioning for Learning to Hash and the Power of k-Choices"
- Dense embedding models are commonly deployed in commercial - search engines, wherein all the vectors are pre-computed, and - near-neighbor search (NNS) is performed with the query vector to find - relevant documents. However, the bottleneck of indexing a large number - of dense vectors and performing an NNS hurts the query time and - accuracy of these models. In this talk, we argue that high-dimensional - and ultra-sparse embedding is a significantly superior alternative to - dense low-dimensional embedding for both query efficiency and - accuracy. Extreme sparsity eliminates the need for NNS by replacing - them with simple lookups, while its high dimensionality ensures that - the embeddings are informative even when sparse. However, learning - extremely high dimensional embeddings leads to blow-up in the model - size. To make the training feasible, we propose a partitioning - algorithm that learns such high-dimensional embeddings across multiple - GPUs without any communication. We theoretically prove that our way of - one-sided learning is equivalent to learning both query and label - embeddings. We call our novel system designed on sparse embeddings as - IRLI (pronounced `early'), which iteratively partitions the items by - learning the relevant buckets directly from the query-item relevance - data. Furthermore, IRLI employs a superior power-of-k-choices based - load balancing strategy. We mathematically show that IRLI retrieves - the correct item with high probability under very natural assumptions - and provides superior load balancing. IRLI surpasses the best - baseline's precision on multi-label classification while being 5x - faster on inference. For near-neighbor search tasks, the same method - outperforms the state-of-the-art Learned Hashing approach NeuralLSH by - requiring only ~ {1/6}^th of the candidates for the same recall. IRLI - is both data and model parallel, making it ideal for distributed GPU - implementation. We demonstrate this advantage by indexing 100 million - dense vectors and surpassing the popular FAISS library by >10%. -

-
- -
-

Why this competition?

- In the past few years, we’ve seen a lot of new research and creative approaches for large-scale ANNS, including: -
    -
  • Partition-based, and graph-based indexing strategies (as well as hybrid indexing approaches).
  • -
  • Mixing RAM and SSD storage to efficiently store and process large datasets that exceed the size of RAM.
  • -
  • Using accelerator hardware such as GPUs, FPGAs, and other custom in-memory silicon.
  • -
  • Leveraging machine learning for dimensionality reduction of the original vectors.
  • -
-

- In addition to an uptick in academic interest, many implementations of these algorithms at scale now appear in production - and high availability datacenter contexts: powering enterprise-grade, mission-critical, and web-scale search applications. - In these deployment scenarios, benchmarks such as cost, preprocessing time, power consumption become just as important as - the recall-vs-latency tradeoff. Despite this, most empirical evaluations of algorithms have focused on smaller datasets - of about a million points, e.g. ann-bechmarks.com. However, deploying recent algorithmic advances in ANNS techniques for - search, recommendation and ranking at scale requires support at billion or substantially larger scale. Barring a few recent - papers, there is limited consensus on which algorithms are effective at this scale. -

- - We believe that this challenge will be impactful in several ways: -
    -
  • Provide a comparative understanding of algorithmic ideas and their application at scale.
  • -
  • Promote the development of new techniques for the problem and demonstration of their value.
  • -
  • Provide a compilation of datasets, many new, to enable future development of algorithms.
  • -
  • Introduce a standard benchmarking approach.
  • -
- By providing a platform for those interested in this problem, we aim to encourge more collaboration and collectively advance the field at a more rapid pace. - Researchers can request Azure compute credit from a pool sponsored by Microsoft Research. -
- -
-

Tracks

-

Standard Hardware Tracks (T1 and T2)

-

- There are two standard standard hardware tracks: -

    -
  • Track 1: In-memory indices with FAISS as the baseline. - Search would use Azure Standard_F32s_v2 VMs - with 32 vCPUs and 64GB RAM. Index construction would use Azure - Standard_F64s_v2 VM - with 64vCPUs, 128GB RAM and an additional 4TB of SSD to be used for storing the data, index and other intermediate data.
  • -
  • Track 2: Out-of-core indices with DiskANN as the baseline. - In addition to the limited DRAM in T1, index can use an SSD for search. - Search would use Azure - Standard_L8s_v2 VMs with 8 vCPUS, 64GB RAM and a local SSD Index constrained to 1TB. - Construction would use Azure - Standard_F64s_v2 VM - with 64vCPU, 128GB RAM and an additional 4TB of SSD to be used for storing the data, index and other intermediate data.
  • -
- Participants are expected to release their code for index building and search which the organizers will run on separate machines. - Participants provide a configuration for their index build code that would complete in 4 days for each dataset. - The protocol for evaluation is as follows: -
    -
  • [on indexing machine] participants will be given a local path with 1B vector dataset.
  • -
  • [on indexing machine] participants build an index from the 1B vectors and store back to local disk.
  • -
  • [on indexing machine] Stored index is copied out to a temporary cloud storage location by the eval framework.
  • -
  • [on search machine] organizers load the index from cloud storage to a local path and provide the path to the search code.
  • -
  • [on search machine] organizers perform searches with held-out query set and measure recall and time to process the queries with several sets of parameters.
  • -
-

- - Finalized details for build and search hardware timing will be released along with the the eval framework. - -

Custom Hardware Track (T3)

-

- Participants can use non-standard hardware such as GPUs, AI accelerators, FPGAs, and custom in-memory silicon. - In this track, participants will either 1) send their hardware, such as PCI boards to GSI Technology or 2) evaluate - themselves using the scripts made available by the organizers. For T3 participants sending hardware, - we will make specific delivery arrangements at participant’s expense. We will install the hardware on a system under - the organizers control (we have a few bare-metal options available) and follow any installation directions provided. - Participants will be allowed to temporarily log into the machine to finalize any installation and configuration, - or for debugging installation as needed. For T3 participants running the evaluation themselves, we request remote ssh - access and sudo accounts on the systems so that the organizers can verify the system and hardware (such as IPMI support, - minimum resource availability such as disk storage for datasets). - - The evaluation phase will proceed like T1/T2, with a few modifications. -

    -
  • For participants that send their hardware, T3 organizers will provide remote access to a separate indexing machine. -
      -
    • [on separate indexing machine] participants download 1B vector dataset and store to local disk
    • -
    • [on separate indexing machine] participants build an index from the 1B vectors and store back to local disk
    • -
    • Stored index is copied to eval machine
    • -
    • [on eval machine] T3 organizers load the index from local disk
    • -
    • [on eval machine] T3 organizers provide index with held-out query set and measure recall and time to process the queries with several sets of parameters. - Index search code can use internal parallelism to batch process the queries.
    • -
    -
  • -
  • For participants that give us remote access to systems, participants are responsible for building their index. -
      -
    • [on indexing machine] participants download 1B vector dataset and store to local disk
    • -
    • [on indexing machine] participants build an index from the 1B vectors and store back to local disk
    • -
    • Stored index is copied to eval machine
    • -
    • [on eval machine] T3 organizers load the index from local disk
    • -
    • [on eval machine] T3 organizers perform searches with held-out query set and measure recall and search time with several sets of parameters.
    • -
    -
  • -
- - T3 will maintain different leaderboards for each dataset based on the following benchmarks: -
    -
  • Recall vs throughput using the same ranking formula as the T1/T2 track
  • -
  • Power- recall vs throughput/watt and a similar ranking formula to the T1/T2 track.
  • -
  • Cost measured as cost/watt (measured as queries/second/watt and MSRP/watt)
  • -
  • Total cost normalized across all tracks.
  • -
- We will provide the exact details on how we collect and compute these benchmarks as well as additional machine and operating system specification before the competition begins. -

-
- -
-

Benchmark Datasets

- We intend to use the following 6 billion point datasets. -
    -
  • BIGANN consists of SIFT descriptors applied to images from extracted from a large image dataset.
  • -
  • Facebook SimSearchNet++ is a new dataset released by Facebook for this competition. - It consists of features used for image copy detection for integrity purposes. - The features are generated by Facebook SimSearchNet++ model.
  • -
  • Microsoft Turing-ANNS-1B is a new dataset being released by the Microsoft Turing team for this competition. - It consists of Bing queries encoded by Turing AGI v5 that trains Transformers to capture similarity of intent in - web search queries. An early version of the RNN-based AGI Encoder is described in a - SIGIR'19 paper and a blogpost.
  • -
  • Microsoft SPACEV-1B is a new web search related dataset - released by Microsoft Bing for this competition. - It consists of document and query vectors encoded by Microsoft SpaceV Superior model to capture generic intent representation.
  • -
  • Yandex DEEP-1B image descriptor dataset consisting of the projected - and normalized outputs from the last fully-connected layer of the GoogLeNet model, which was pretrained on the Imagenet classification task.
  • -
  • Yandex Text-to-Image-1B is a new cross-model dataset (text and visual), - where database and query vectors have different distributions in a shared representation space. The base set consists of Image embeddings produced by the - Se-ResNext-101 model, and queries are textual embeddings produced by a variant of the DSSM model. Since the distributions are different, a 50M sample - of the query distribution is provided.
  • -
- -

- All datasets are in the common binary format that starts with 8 bytes of data consisting of num_points(uint32_t) - num_dimensions(uint32) followed by num_pts X num_dimensions x sizeof(type) bytes of data stored one vector after another. Data files - will have suffixes .fbin, .u8bin, and .i8bin to represent float32, uint8 and int8 type data. Note that a different query set - will be used for evaluation. The details of the datasets along with links to the base, query and sample sets, and the ground truth nearest neighbors - of the query set are listed below. -

- -

- The ground truth binary files for k-NN search consist of the following information: num_queries(uint32_t) - K-NN(uint32) followed by num_queries X K x sizeof(uint32_t) bytes of data representing the IDs of the K-nearest neighbors of the - queries, followed by num_queries X K x sizeof(float) bytes of data representing the distances to the corresponding points. The distances - help identify neighbors tied in terms of distances. In recall calculation, returning a neighbor not in the ground truth set but whose distance is tied - with an entry in the ground truth is counted as success. -

-

- The ground truth binary files for range search consist of the following information: num_queries(int32_t) followed by the total number - of results total_res(int32_t) followed - by num_queries X size(int32_t) bytes corresponding to num_results_per_query for each query, followed by total_res X sizeof(int32_t) - bytes corresponding to the IDs of the neighbors of each query one after the other. -

-

- The ground truth files for the first 10M slice, the first 100M slice, and the complete 1B set of each dataset against the respective query set can be downloaded - here(10M), - here(100M), and - here(1B). -

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Dataset Datatype Dimensions Distance Range/k-NN Base data Sample data Query data Ground truth Release terms
BIGANN uint8 128 L2 k-NN 1B points 100M base points 10K queries link CC0
Facebook SimSearchNet++* uint8 256 L2 Range (squared distance: 96237) 1B points N/A 100k queries link CC BY-NC
Microsoft Turing-ANNS* float32 100 L2 k-NN 1B points N/A 100K queries link link to terms
Microsoft SPACEV* int8 100 L2 k-NN 1B points 100M base points 29.3K queries link O-UDA
Yandex DEEP float32 96 L2 k-NN 1B points 350M base points 10K queries link CC BY 4.0
Yandex Text-to-Image* float32 200 inner-product k-NN 1B points 50M queries 100K queries link CC BY 4.0
- * new datasets
- We recommend using Axel for downloading BIGANN, Facebook-SSN++, Yandex DEEP1B and T2I datasets.
- We recommend using AzCopy for downloading Microsoft datasets. -
- -
-

Metrics

- The competition will measure recall@10 of the algorithms on the 6 data sets a private query set (unreleased) at a fixed query throughput. - Track T1 measures recall of algorithms at 10000 Queries/second (on 32 vCPUs), T2 measures recall at 1500 Queries/second, T2 measures recall at 2000 Queries/second. - The primary metric for comparison in each track will be the sum of improvements in recall over the baseline at the target QPS over all datasets. - Additionally, track T3 will also rank entries by power and cost per query. See this - notebook - for power and cost analysis. - A team has to publish an algorithm and commit to benchmarking on at least 3 datasets to be considered for ranking. Recall regression on a dataset selected - by a team will be continued as a negative score. - The recall@10(AP for SSN++-1B dataset) of the baseline algorithms on each dataset for the public query set is listed below. - - - - - - - - - - - - - - - - - -
Track Algorithm Search MachineTarget Queries/secBIGANN-1B SSN++-1B Turing-ANNS-1B SPACEV-1B DEEP-1B Text-to-Image-1B
Track 1 FAISS-CPU Azure F32s_v2 32vCPUs + 64GB RAM10000 0.634 0.753 0.703 0.728 0.650 0.069
Track 2 DiskANN Azure L8s_v2 8vCPUs + 64GB RAM + 1TB SSD 1500 0.949 0.16274 0.936 0.901 0.937 0.488
Track 3 FAISS-GPU NVIDIA V100 + 700GB RAM2000 0.927 TBA 0.910 0.850 0.942 0.86
-
- -
- Baseline DiskANN indices for T2 can be downloaded using "azcopy copy 'https://comp21storage.blob.core.windows.net/publiccontainer/comp21/diskann-T2-baseline-indices' 'local_folder' --recursive". - Note that this would take some time as the indices are large. All indices were built using R and L parameters set to 100. - Search for T2 used 16 threads and beamwidth 4. The Ls parameter was varied to tune recall vs QPS.
- Update: T2 baseline results have been modified after measuring via pybind11 interface on docker. There was a 30-40% QPS loss using this interface - as compared to direct measurements of C++ code from commandline. As a result, the QPS target has now been lowered, and the recall is reported at this threshold. - - -
-

Call for Participation and Timeline

-

- Participation is open to all teams interested in developing new algorithms or re-implementing - existing algorithms more efficiently either in software or hardware. Participants are - requested to submit a brief document through CMT - for each track they will be competing in. The document should contain the following details: -

    -
  • Name, email and affiliation of each participant in the team
  • -
  • A name and/or URL for the submission.
  • -
  • [Optional] To receive Azure credits for developing new ideas, please submit your request - by June 30th with preliminary data on smaller scale datasets and why you think - your algorithm will work well at billion scale. This will be used by the organizers to select strong - entries. We request teams who already have access to infrastructure (e.g. those from industry or - with access to large university clusters) to skip this.
  • -
- - - For Track T3, the document should contain the following additional details to help organizers plan - and assess eligibility for seperate leaderboards: -
    -
  • Type of hardware, e.g., PCIe extension board, rack-mounted system, or other.
  • -
  • Evidence of the retail MSRP of the hardware, i.e., pricing on website or copy of the customer invoice.
  • -
  • If hardware will be sent to GSI Technology (at the participants expense) or if organizers will given remote access to the systems. - For remote system access participants, whether their system supports standard IPMI power monitoring. - If not IPMI, then an equivalent power monitoring interface must be available. -
  • Operating system requirements.
  • -
  • Whether the participant requires a separate machine for index building. We have limited Azure-based - Fsv2-series machines and some bare-metal machines managed by the T3 organizers.
  • -
-

- -

Consent Forms

- Please review and complete the consent form for participation in Tracks T1/T2 - and Track T3. Note that there are separate consent forms - for the standard and custom hardware tracks. Completing the form is necessary for participation. - -

-

Timeline (subject to change)

-
    -
  • May: release of data, guidelines, and a call for participation. Registration open.
  • -
  • June: Baseline results, testing infrastructure and final ranking metrics released.
  • -
  • July 11th: Participants in need of compute resources to submit an expression of interest.
  • -
  • Mid-July: Allocation of compute resources.
  • -
  • July 30th: Final deadline for participants to submit an expression of interest through CMT.
  • -
  • October 22nd: End of competition period. Teams to release of code in a containerized form, and complete a pull request to the eval framework with code to run the algorithms.
  • -
  • October 29th: Participants submit a brief report outlining their algorithm and results.
  • -
  • Mid-November: Release of preliminary results on standardized machines. Review of code by organizers and participants. Participants can raise concerns about the evaluation.
  • -
  • Early December: Final results published, and competition results archived (the competition will go on if interest continues).
  • -
  • During NeurIPS, organizers will provide an overview of the competition and results. Organizers will also request the best entries - (including leaderboard toppers, or promising new approaches) to present an overview for further discussion.
  • -
-

-
- - -
- + + + + + + + + + Big ANN Benchmarks + + + + + + + + + + + + + + + +
+

Billion-Scale Approximate Nearest Neighbor Search Challenge: NeurIPS'21 competition track

+ +

+
+ + + + Evaluation framework on github. Subscribe to our + Medium channel + for discussions and announcements. + + + + +
+

NeurIPS'21 leaderboard and session schedule

+ +

+ Track T1/T2 leaderboard.
+ Track T3 leaderboard. +

+ + The NeurIPS session for this competition took place on Dec 8. + Find slides of the talks and videos below.
+
    +
  • 11:05-11:25: Overview Talk (slides)
  • +
  • 12:00-12:45: Overview of results presented by organizers, followed by Q&A (video)
  • +
      +
    • Standard hardware tracks T1 and T2 results (slides)
    • +
    • Custom hardware track T3 results (slides)
    • +
    +
  • 12:45-13:20: Invited talk 1 by Prof. Alexandr Andoni: Learning to Hash Robustly, with Guarantees (slides, video)
  • +
  • 13:20-13:55: Invited talk 2 by Prof. Anshumali Shrivastava:Iterative Repartitioning for Learning to Hash and the Power of k-Choices (slides,video)
  • +
  • 13:55-14:30: Talks from track winners. +
      +
    • Track 1: kst_ann_t1 Li Liu, Jin Yu, Guohao Dai, Wei Wu, Yu Qiao, Yu Wang, Lingzhi Liu, Kuaishou Technology and Tsinghua University (slides, video)
    • +
    • Track 2: BBANN Xiaomeng Yi, Xiaofan Luan, Weizhi Xu, Qianya Cheng, Jigao Luo, Xiangyu Wang, Jiquan Long, Xiao Yan, Zheng Bian, Jiarui Luo, Shengjun Li, Chengming Li, Zilliz and Southern University of Science and Technology (slides, video)
    • +
    • Track 3: OptaNNe Sourabh Dongaonkar, Mark Hildebrand, Mariano Tepper, Cecilia Aguerrebere, Ted Willke, Jawad Khan, Intel Corporation, Intel Labs and UC Davis (slides, video)
    • +
    +
  • +
  • 14:30-15:00: Open discussion on competition and future directions (github thread, video)
  • +
+ Additional material:Overview Talk. +

+ + + +

+ Abstract for Invited talk: "Learning to Hash Robustly, with Guarantees"
+ There is a gap between the high-dimensional nearest neighbor search + (NNS) algorithms achieving the best worst-case guarantees and the + top-performing ones in practice. The former are based on indexing via + the randomized Locality Sensitive Hashing (LSH), and its + derivatives. The latter "learn" the best indexing method in order to + speed-up NNS, crucially adapting to the structure of the given + dataset. Alas, the latter also almost always come at the cost of + losing the guarantees of either correctness or robust performance on + adversarial queries (or apply to datasets with an assumed extra + structure/model). + + How can we bridge these two perspectives and bring the best of both + worlds? As a step in this direction, we will talk about an NNS algorithm + that has worst-case guarantees essentially matching that of + theoretical algorithms, while optimizing the hashing to the structure + of the dataset (think instance-optimal algorithms) for performance on + the minimum-performing query. We will discuss the algorithm's ability + to optimize for a given dataset from both theoretical and practical + perspective. +

+ +

+ Abstract for Invited talk: "Iterative Repartitioning for Learning to Hash and the Power of k-Choices"
+ Dense embedding models are commonly deployed in commercial + search engines, wherein all the vectors are pre-computed, and + near-neighbor search (NNS) is performed with the query vector to find + relevant documents. However, the bottleneck of indexing a large number + of dense vectors and performing an NNS hurts the query time and + accuracy of these models. In this talk, we argue that high-dimensional + and ultra-sparse embedding is a significantly superior alternative to + dense low-dimensional embedding for both query efficiency and + accuracy. Extreme sparsity eliminates the need for NNS by replacing + them with simple lookups, while its high dimensionality ensures that + the embeddings are informative even when sparse. However, learning + extremely high dimensional embeddings leads to blow-up in the model + size. To make the training feasible, we propose a partitioning + algorithm that learns such high-dimensional embeddings across multiple + GPUs without any communication. We theoretically prove that our way of + one-sided learning is equivalent to learning both query and label + embeddings. We call our novel system designed on sparse embeddings as + IRLI (pronounced `early'), which iteratively partitions the items by + learning the relevant buckets directly from the query-item relevance + data. Furthermore, IRLI employs a superior power-of-k-choices based + load balancing strategy. We mathematically show that IRLI retrieves + the correct item with high probability under very natural assumptions + and provides superior load balancing. IRLI surpasses the best + baseline's precision on multi-label classification while being 5x + faster on inference. For near-neighbor search tasks, the same method + outperforms the state-of-the-art Learned Hashing approach NeuralLSH by + requiring only ~ {1/6}^th of the candidates for the same recall. IRLI + is both data and model parallel, making it ideal for distributed GPU + implementation. We demonstrate this advantage by indexing 100 million + dense vectors and surpassing the popular FAISS library by >10%. +

+
+ +
+

Why this competition?

+ In the past few years, we’ve seen a lot of new research and creative approaches for large-scale ANNS, including: +
    +
  • Partition-based, and graph-based indexing strategies (as well as hybrid indexing approaches).
  • +
  • Mixing RAM and SSD storage to efficiently store and process large datasets that exceed the size of RAM.
  • +
  • Using accelerator hardware such as GPUs, FPGAs, and other custom in-memory silicon.
  • +
  • Leveraging machine learning for dimensionality reduction of the original vectors.
  • +
+

+ In addition to an uptick in academic interest, many implementations of these algorithms at scale now appear in production + and high availability datacenter contexts: powering enterprise-grade, mission-critical, and web-scale search applications. + In these deployment scenarios, benchmarks such as cost, preprocessing time, power consumption become just as important as + the recall-vs-latency tradeoff. Despite this, most empirical evaluations of algorithms have focused on smaller datasets + of about a million points, e.g. ann-bechmarks.com. However, deploying recent algorithmic advances in ANNS techniques for + search, recommendation and ranking at scale requires support at billion or substantially larger scale. Barring a few recent + papers, there is limited consensus on which algorithms are effective at this scale. +

+ + We believe that this challenge will be impactful in several ways: +
    +
  • Provide a comparative understanding of algorithmic ideas and their application at scale.
  • +
  • Promote the development of new techniques for the problem and demonstration of their value.
  • +
  • Provide a compilation of datasets, many new, to enable future development of algorithms.
  • +
  • Introduce a standard benchmarking approach.
  • +
+ By providing a platform for those interested in this problem, we aim to encourge more collaboration and collectively advance the field at a more rapid pace. + Researchers can request Azure compute credit from a pool sponsored by Microsoft Research. +
+ +
+

Tracks

+

Standard Hardware Tracks (T1 and T2)

+

+ There are two standard standard hardware tracks: +

    +
  • Track 1: In-memory indices with FAISS as the baseline. + Search would use Azure Standard_F32s_v2 VMs + with 32 vCPUs and 64GB RAM. Index construction would use Azure + Standard_F64s_v2 VM + with 64vCPUs, 128GB RAM and an additional 4TB of SSD to be used for storing the data, index and other intermediate data.
  • +
  • Track 2: Out-of-core indices with DiskANN as the baseline. + In addition to the limited DRAM in T1, index can use an SSD for search. + Search would use Azure + Standard_L8s_v2 VMs with 8 vCPUS, 64GB RAM and a local SSD Index constrained to 1TB. + Construction would use Azure + Standard_F64s_v2 VM + with 64vCPU, 128GB RAM and an additional 4TB of SSD to be used for storing the data, index and other intermediate data.
  • +
+ Participants are expected to release their code for index building and search which the organizers will run on separate machines. + Participants provide a configuration for their index build code that would complete in 4 days for each dataset. + The protocol for evaluation is as follows: +
    +
  • [on indexing machine] participants will be given a local path with 1B vector dataset.
  • +
  • [on indexing machine] participants build an index from the 1B vectors and store back to local disk.
  • +
  • [on indexing machine] Stored index is copied out to a temporary cloud storage location by the eval framework.
  • +
  • [on search machine] organizers load the index from cloud storage to a local path and provide the path to the search code.
  • +
  • [on search machine] organizers perform searches with held-out query set and measure recall and time to process the queries with several sets of parameters.
  • +
+

+ + Finalized details for build and search hardware timing will be released along with the the eval framework. + +

Custom Hardware Track (T3)

+

+ Participants can use non-standard hardware such as GPUs, AI accelerators, FPGAs, and custom in-memory silicon. + In this track, participants will either 1) send their hardware, such as PCI boards to GSI Technology or 2) evaluate + themselves using the scripts made available by the organizers. For T3 participants sending hardware, + we will make specific delivery arrangements at participant’s expense. We will install the hardware on a system under + the organizers control (we have a few bare-metal options available) and follow any installation directions provided. + Participants will be allowed to temporarily log into the machine to finalize any installation and configuration, + or for debugging installation as needed. For T3 participants running the evaluation themselves, we request remote ssh + access and sudo accounts on the systems so that the organizers can verify the system and hardware (such as IPMI support, + minimum resource availability such as disk storage for datasets). + + The evaluation phase will proceed like T1/T2, with a few modifications. +

    +
  • For participants that send their hardware, T3 organizers will provide remote access to a separate indexing machine. +
      +
    • [on separate indexing machine] participants download 1B vector dataset and store to local disk
    • +
    • [on separate indexing machine] participants build an index from the 1B vectors and store back to local disk
    • +
    • Stored index is copied to eval machine
    • +
    • [on eval machine] T3 organizers load the index from local disk
    • +
    • [on eval machine] T3 organizers provide index with held-out query set and measure recall and time to process the queries with several sets of parameters. + Index search code can use internal parallelism to batch process the queries.
    • +
    +
  • +
  • For participants that give us remote access to systems, participants are responsible for building their index. +
      +
    • [on indexing machine] participants download 1B vector dataset and store to local disk
    • +
    • [on indexing machine] participants build an index from the 1B vectors and store back to local disk
    • +
    • Stored index is copied to eval machine
    • +
    • [on eval machine] T3 organizers load the index from local disk
    • +
    • [on eval machine] T3 organizers perform searches with held-out query set and measure recall and search time with several sets of parameters.
    • +
    +
  • +
+ + T3 will maintain different leaderboards for each dataset based on the following benchmarks: +
    +
  • Recall vs throughput using the same ranking formula as the T1/T2 track
  • +
  • Power- recall vs throughput/watt and a similar ranking formula to the T1/T2 track.
  • +
  • Cost measured as cost/watt (measured as queries/second/watt and MSRP/watt)
  • +
  • Total cost normalized across all tracks.
  • +
+ We will provide the exact details on how we collect and compute these benchmarks as well as additional machine and operating system specification before the competition begins. +

+
+ +
+

Benchmark Datasets

+ We intend to use the following 6 billion point datasets. +
    +
  • BIGANN consists of SIFT descriptors applied to images from extracted from a large image dataset.
  • +
  • Facebook SimSearchNet++ is a new dataset released by Facebook for this competition. + It consists of features used for image copy detection for integrity purposes. + The features are generated by Facebook SimSearchNet++ model.
  • +
  • Microsoft Turing-ANNS-1B is a new dataset being released by the Microsoft Turing team for this competition. + It consists of Bing queries encoded by Turing AGI v5 that trains Transformers to capture similarity of intent in + web search queries. An early version of the RNN-based AGI Encoder is described in a + SIGIR'19 paper and a blogpost.
  • +
  • Microsoft SPACEV-1B is a new web search related dataset + released by Microsoft Bing for this competition. + It consists of document and query vectors encoded by Microsoft SpaceV Superior model to capture generic intent representation.
  • +
  • Yandex DEEP-1B image descriptor dataset consisting of the projected + and normalized outputs from the last fully-connected layer of the GoogLeNet model, which was pretrained on the Imagenet classification task.
  • +
  • Yandex Text-to-Image-1B is a new cross-model dataset (text and visual), + where database and query vectors have different distributions in a shared representation space. The base set consists of Image embeddings produced by the + Se-ResNext-101 model, and queries are textual embeddings produced by a variant of the DSSM model. Since the distributions are different, a 50M sample + of the query distribution is provided.
  • +
+ +

+ All datasets are in the common binary format that starts with 8 bytes of data consisting of num_points(uint32_t) + num_dimensions(uint32) followed by num_pts X num_dimensions x sizeof(type) bytes of data stored one vector after another. Data files + will have suffixes .fbin, .u8bin, and .i8bin to represent float32, uint8 and int8 type data. Note that a different query set + will be used for evaluation. The details of the datasets along with links to the base, query and sample sets, and the ground truth nearest neighbors + of the query set are listed below. +

+ +

+ The ground truth binary files for k-NN search consist of the following information: num_queries(uint32_t) + K-NN(uint32) followed by num_queries X K x sizeof(uint32_t) bytes of data representing the IDs of the K-nearest neighbors of the + queries, followed by num_queries X K x sizeof(float) bytes of data representing the distances to the corresponding points. The distances + help identify neighbors tied in terms of distances. In recall calculation, returning a neighbor not in the ground truth set but whose distance is tied + with an entry in the ground truth is counted as success. +

+

+ The ground truth binary files for range search consist of the following information: num_queries(int32_t) followed by the total number + of results total_res(int32_t) followed + by num_queries X size(int32_t) bytes corresponding to num_results_per_query for each query, followed by total_res X sizeof(int32_t) + bytes corresponding to the IDs of the neighbors of each query one after the other. +

+

+ The ground truth files for the first 10M slice, the first 100M slice, and the complete 1B set of each dataset against the respective query set can be downloaded + here(10M), + here(100M), and + here(1B). +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Dataset Datatype Dimensions Distance Range/k-NN Base data Sample data Query data Ground truth Release terms
BIGANN uint8 128 L2 k-NN 1B points 100M base points 10K queries link CC0
Facebook SimSearchNet++* uint8 256 L2 Range (squared distance: 96237) 1B points N/A 100k queries link CC BY-NC
Microsoft Turing-ANNS* float32 100 L2 k-NN 1B points N/A 100K queries link link to terms
Microsoft SPACEV* int8 100 L2 k-NN 1B points 100M base points 29.3K queries link O-UDA
Yandex DEEP float32 96 L2 k-NN 1B points 350M base points 10K queries link CC BY 4.0
Yandex Text-to-Image* float32 200 inner-product k-NN 1B points 50M queries 100K queries link CC BY 4.0
+ * new datasets
+ We recommend using Axel for downloading BIGANN, Facebook-SSN++, Yandex DEEP1B and T2I datasets.
+ We recommend using AzCopy for downloading Microsoft datasets. +
+ +
+

Metrics

+ The competition will measure recall@10 of the algorithms on the 6 data sets a private query set (unreleased) at a fixed query throughput. + Track T1 measures recall of algorithms at 10000 Queries/second (on 32 vCPUs), T2 measures recall at 1500 Queries/second, T2 measures recall at 2000 Queries/second. + The primary metric for comparison in each track will be the sum of improvements in recall over the baseline at the target QPS over all datasets. + Additionally, track T3 will also rank entries by power and cost per query. See this + notebook + for power and cost analysis. + A team has to publish an algorithm and commit to benchmarking on at least 3 datasets to be considered for ranking. Recall regression on a dataset selected + by a team will be continued as a negative score. + The recall@10(AP for SSN++-1B dataset) of the baseline algorithms on each dataset for the public query set is listed below. + + + + + + + + + + + + + + + + + +
Track Algorithm Search MachineTarget Queries/secBIGANN-1B SSN++-1B Turing-ANNS-1B SPACEV-1B DEEP-1B Text-to-Image-1B
Track 1 FAISS-CPU Azure F32s_v2 32vCPUs + 64GB RAM10000 0.634 0.753 0.703 0.728 0.650 0.069
Track 2 DiskANN Azure L8s_v2 8vCPUs + 64GB RAM + 1TB SSD 1500 0.949 0.16274 0.936 0.901 0.937 0.488
Track 3 FAISS-GPU NVIDIA V100 + 700GB RAM2000 0.927 TBA 0.910 0.850 0.942 0.86
+
+ +
+ Baseline DiskANN indices for T2 can be downloaded using "azcopy copy 'https://comp21storage.blob.core.windows.net/publiccontainer/comp21/diskann-T2-baseline-indices' 'local_folder' --recursive". + Note that this would take some time as the indices are large. All indices were built using R and L parameters set to 100. + Search for T2 used 16 threads and beamwidth 4. The Ls parameter was varied to tune recall vs QPS.
+ Update: T2 baseline results have been modified after measuring via pybind11 interface on docker. There was a 30-40% QPS loss using this interface + as compared to direct measurements of C++ code from commandline. As a result, the QPS target has now been lowered, and the recall is reported at this threshold. + + +
+

Call for Participation and Timeline

+

+ Participation is open to all teams interested in developing new algorithms or re-implementing + existing algorithms more efficiently either in software or hardware. Participants are + requested to submit a brief document through CMT + for each track they will be competing in. The document should contain the following details: +

    +
  • Name, email and affiliation of each participant in the team
  • +
  • A name and/or URL for the submission.
  • +
  • [Optional] To receive Azure credits for developing new ideas, please submit your request + by June 30th with preliminary data on smaller scale datasets and why you think + your algorithm will work well at billion scale. This will be used by the organizers to select strong + entries. We request teams who already have access to infrastructure (e.g. those from industry or + with access to large university clusters) to skip this.
  • +
+ + + For Track T3, the document should contain the following additional details to help organizers plan + and assess eligibility for seperate leaderboards: +
    +
  • Type of hardware, e.g., PCIe extension board, rack-mounted system, or other.
  • +
  • Evidence of the retail MSRP of the hardware, i.e., pricing on website or copy of the customer invoice.
  • +
  • If hardware will be sent to GSI Technology (at the participants expense) or if organizers will given remote access to the systems. + For remote system access participants, whether their system supports standard IPMI power monitoring. + If not IPMI, then an equivalent power monitoring interface must be available. +
  • Operating system requirements.
  • +
  • Whether the participant requires a separate machine for index building. We have limited Azure-based + Fsv2-series machines and some bare-metal machines managed by the T3 organizers.
  • +
+

+ +

Consent Forms

+ Please review and complete the consent form for participation in Tracks T1/T2 + and Track T3. Note that there are separate consent forms + for the standard and custom hardware tracks. Completing the form is necessary for participation. + +

+

Timeline (subject to change)

+
    +
  • May: release of data, guidelines, and a call for participation. Registration open.
  • +
  • June: Baseline results, testing infrastructure and final ranking metrics released.
  • +
  • July 11th: Participants in need of compute resources to submit an expression of interest.
  • +
  • Mid-July: Allocation of compute resources.
  • +
  • July 30th: Final deadline for participants to submit an expression of interest through CMT.
  • +
  • October 22nd: End of competition period. Teams to release of code in a containerized form, and complete a pull request to the eval framework with code to run the algorithms.
  • +
  • October 29th: Participants submit a brief report outlining their algorithm and results.
  • +
  • Mid-November: Release of preliminary results on standardized machines. Review of code by organizers and participants. Participants can raise concerns about the evaluation.
  • +
  • Early December: Final results published, and competition results archived (the competition will go on if interest continues).
  • +
  • During NeurIPS, organizers will provide an overview of the competition and results. Organizers will also request the best entries + (including leaderboard toppers, or promising new approaches) to present an overview for further discussion.
  • +
+

+
+ + +
+ \ No newline at end of file diff --git a/eval/show_operating_points.py b/eval/show_operating_points.py deleted file mode 100644 index b1c224d57..000000000 --- a/eval/show_operating_points.py +++ /dev/null @@ -1,26 +0,0 @@ -import argparse -import pandas as pd - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.add_argument( - '--algorithm', - required=True) - parser.add_argument( - '--threshold', - default=10000, - help='minimum QPS (10,000 T1/2,000 T2)', - type=int) - parser.add_argument( - 'csv', - metavar='CSV', - help='input csv') - - args = parser.parse_args() - df = pd.read_csv(args.csv) - - print(df[(df.qps > args.threshold) & (df.algorithm == args.algorithm)].groupby(['algorithm', 'dataset']).max()[['recall/ap']]) - - - - diff --git a/fonts/MediumLLWeb-Regular.woff2 b/fonts/MediumLLWeb-Regular.woff2 new file mode 100644 index 000000000..8531f544f Binary files /dev/null and b/fonts/MediumLLWeb-Regular.woff2 differ diff --git a/fonts/MediumLLWeb-SemiBold.woff2 b/fonts/MediumLLWeb-SemiBold.woff2 new file mode 100644 index 000000000..fe88205ce Binary files /dev/null and b/fonts/MediumLLWeb-SemiBold.woff2 differ diff --git a/index.html b/index.html index aeea3c4de..56b5f9261 100644 --- a/index.html +++ b/index.html @@ -1,551 +1,9 @@ - - - - - - - - Big ANN Benchmarks - - - - - - - - - - - - - - - -
-

Billion-Scale Approximate Nearest Neighbor Search Challenge: NeurIPS'21 competition track

- -

- - - - -

Code, Report, Results and Blogs

- - -
-

NeurIPS'21 session schedule

- The NeurIPS session for this competition is scheduled for Dec 8. - NeurIPS registration is required for access to the session.
- Overview Talk and Break-out session schedule (GMT). -
    -
  • 11:05-11:25: Overview Talk (slides)
  • -
  • 12:00-12:45: Overview of results presented by organizers, followed by Q&A
  • -
      -
    • Standard hardware tracks T1 and T2 results (slides)
    • -
    • Custom hardware track T3 results (slides)
    • -
    -
  • 12:45-13:20: Invited talk 1 by Prof. Alexandr Andoni: Learning to Hash Robustly, with Guarantees (slides)
  • -
  • 13:20-13:55: Invited talk 2 by Prof. Anshumali Shrivastava:Iterative Repartitioning for Learning to Hash and the Power of k-Choices (slides)
  • -
  • 13:55-14:30: Talks from track winners. -
      -
    • Track 1: kst_ann_t1 Li Liu, Jin Yu, Guohao Dai, Wei Wu, Yu Qiao, Yu Wang, Lingzhi Liu, Kuaishou Technology and Tsinghua University
    • -
    • Track 2: BBANN Xiaomeng Yi, Xiaofan Luan, Weizhi Xu, Qianya Cheng, Jigao Luo, Xiangyu Wang, Jiquan Long, Xiao Yan, Zheng Bian, Jiarui Luo, Shengjun Li, Chengming Li, Zilliz and Southern University of Science and Technology (slides)
    • -
    • Track 3: OptaNNe Sourabh Dongaonkar, Mark Hildebrand, Mariano Tepper, Cecilia Aguerrebere, Ted Willke, Jawad Khan, Intel Corporation, Intel Labs and UC Davis (slides)
    • -
    -
  • -
  • 14:30-15:00: Open discussion on competition and future directions (github thread)
  • -
-

- - - -

- Abstract for Invited talk: "Learning to Hash Robustly, with Guarantees"
- There is a gap between the high-dimensional nearest neighbor search - (NNS) algorithms achieving the best worst-case guarantees and the - top-performing ones in practice. The former are based on indexing via - the randomized Locality Sensitive Hashing (LSH), and its - derivatives. The latter "learn" the best indexing method in order to - speed-up NNS, crucially adapting to the structure of the given - dataset. Alas, the latter also almost always come at the cost of - losing the guarantees of either correctness or robust performance on - adversarial queries (or apply to datasets with an assumed extra - structure/model). - - How can we bridge these two perspectives and bring the best of both - worlds? As a step in this direction, we will talk about an NNS algorithm - that has worst-case guarantees essentially matching that of - theoretical algorithms, while optimizing the hashing to the structure - of the dataset (think instance-optimal algorithms) for performance on - the minimum-performing query. We will discuss the algorithm's ability - to optimize for a given dataset from both theoretical and practical - perspective. -

- -

- Abstract for Invited talk: "Iterative Repartitioning for Learning to Hash and the Power of k-Choices"
- Dense embedding models are commonly deployed in commercial - search engines, wherein all the vectors are pre-computed, and - near-neighbor search (NNS) is performed with the query vector to find - relevant documents. However, the bottleneck of indexing a large number - of dense vectors and performing an NNS hurts the query time and - accuracy of these models. In this talk, we argue that high-dimensional - and ultra-sparse embedding is a significantly superior alternative to - dense low-dimensional embedding for both query efficiency and - accuracy. Extreme sparsity eliminates the need for NNS by replacing - them with simple lookups, while its high dimensionality ensures that - the embeddings are informative even when sparse. However, learning - extremely high dimensional embeddings leads to blow-up in the model - size. To make the training feasible, we propose a partitioning - algorithm that learns such high-dimensional embeddings across multiple - GPUs without any communication. We theoretically prove that our way of - one-sided learning is equivalent to learning both query and label - embeddings. We call our novel system designed on sparse embeddings as - IRLI (pronounced `early'), which iteratively partitions the items by - learning the relevant buckets directly from the query-item relevance - data. Furthermore, IRLI employs a superior power-of-k-choices based - load balancing strategy. We mathematically show that IRLI retrieves - the correct item with high probability under very natural assumptions - and provides superior load balancing. IRLI surpasses the best - baseline's precision on multi-label classification while being 5x - faster on inference. For near-neighbor search tasks, the same method - outperforms the state-of-the-art Learned Hashing approach NeuralLSH by - requiring only ~ {1/6}^th of the candidates for the same recall. IRLI - is both data and model parallel, making it ideal for distributed GPU - implementation. We demonstrate this advantage by indexing 100 million - dense vectors and surpassing the popular FAISS library by >10%. -

-
- -
-

Why this competition?

- In the past few years, we’ve seen a lot of new research and creative approaches for large-scale ANNS, including: -
    -
  • Partition-based, and graph-based indexing strategies (as well as hybrid indexing approaches).
  • -
  • Mixing RAM and SSD storage to efficiently store and process large datasets that exceed the size of RAM.
  • -
  • Using accelerator hardware such as GPUs, FPGAs, and other custom in-memory silicon.
  • -
  • Leveraging machine learning for dimensionality reduction of the original vectors.
  • -
-

- In addition to an uptick in academic interest, many implementations of these algorithms at scale now appear in production - and high availability datacenter contexts: powering enterprise-grade, mission-critical, and web-scale search applications. - In these deployment scenarios, benchmarks such as cost, preprocessing time, power consumption become just as important as - the recall-vs-latency tradeoff. Despite this, most empirical evaluations of algorithms have focused on smaller datasets - of about a million points, e.g. ann-bechmarks.com. However, deploying recent algorithmic advances in ANNS techniques for - search, recommendation and ranking at scale requires support at billion or substantially larger scale. Barring a few recent - papers, there is limited consensus on which algorithms are effective at this scale. -

- - We believe that this challenge will be impactful in several ways: -
    -
  • Provide a comparative understanding of algorithmic ideas and their application at scale.
  • -
  • Promote the development of new techniques for the problem and demonstration of their value.
  • -
  • Provide a compilation of datasets, many new, to enable future development of algorithms.
  • -
  • Introduce a standard benchmarking approach.
  • -
- By providing a platform for those interested in this problem, we aim to encourge more collaboration and collectively advance the field at a more rapid pace. - Researchers can request Azure compute credit from a pool sponsored by Microsoft Research. -
- -
-

Tracks

-

Standard Hardware Tracks (T1 and T2)

-

- There are two standard standard hardware tracks: -

    -
  • Track 1: In-memory indices with FAISS as the baseline. - Search would use Azure Standard_F32s_v2 VMs - with 32 vCPUs and 64GB RAM. Index construction would use Azure - Standard_F64s_v2 VM - with 64vCPUs, 128GB RAM and an additional 4TB of SSD to be used for storing the data, index and other intermediate data.
  • -
  • Track 2: Out-of-core indices with DiskANN as the baseline. - In addition to the limited DRAM in T1, index can use an SSD for search. - Search would use Azure - Standard_L8s_v2 VMs with 8 vCPUS, 64GB RAM and a local SSD Index constrained to 1TB. - Construction would use Azure - Standard_F64s_v2 VM - with 64vCPU, 128GB RAM and an additional 4TB of SSD to be used for storing the data, index and other intermediate data.
  • -
- Participants are expected to release their code for index building and search which the organizers will run on separate machines. - Participants provide a configuration for their index build code that would complete in 4 days for each dataset. - The protocol for evaluation is as follows: -
    -
  • [on indexing machine] participants will be given a local path with 1B vector dataset.
  • -
  • [on indexing machine] participants build an index from the 1B vectors and store back to local disk.
  • -
  • [on indexing machine] Stored index is copied out to a temporary cloud storage location by the eval framework.
  • -
  • [on search machine] organizers load the index from cloud storage to a local path and provide the path to the search code.
  • -
  • [on search machine] organizers perform searches with held-out query set and measure recall and time to process the queries with several sets of parameters.
  • -
-

- - Finalized details for build and search hardware timing will be released along with the the eval framework. - -

Custom Hardware Track (T3)

-

- Participants can use non-standard hardware such as GPUs, AI accelerators, FPGAs, and custom in-memory silicon. - In this track, participants will either 1) send their hardware, such as PCI boards to GSI Technology or 2) evaluate - themselves using the scripts made available by the organizers. For T3 participants sending hardware, - we will make specific delivery arrangements at participant’s expense. We will install the hardware on a system under - the organizers control (we have a few bare-metal options available) and follow any installation directions provided. - Participants will be allowed to temporarily log into the machine to finalize any installation and configuration, - or for debugging installation as needed. For T3 participants running the evaluation themselves, we request remote ssh - access and sudo accounts on the systems so that the organizers can verify the system and hardware (such as IPMI support, - minimum resource availability such as disk storage for datasets). - - The evaluation phase will proceed like T1/T2, with a few modifications. -

    -
  • For participants that send their hardware, T3 organizers will provide remote access to a separate indexing machine. -
      -
    • [on separate indexing machine] participants download 1B vector dataset and store to local disk
    • -
    • [on separate indexing machine] participants build an index from the 1B vectors and store back to local disk
    • -
    • Stored index is copied to eval machine
    • -
    • [on eval machine] T3 organizers load the index from local disk
    • -
    • [on eval machine] T3 organizers provide index with held-out query set and measure recall and time to process the queries with several sets of parameters. - Index search code can use internal parallelism to batch process the queries.
    • -
    -
  • -
  • For participants that give us remote access to systems, participants are responsible for building their index. -
      -
    • [on indexing machine] participants download 1B vector dataset and store to local disk
    • -
    • [on indexing machine] participants build an index from the 1B vectors and store back to local disk
    • -
    • Stored index is copied to eval machine
    • -
    • [on eval machine] T3 organizers load the index from local disk
    • -
    • [on eval machine] T3 organizers perform searches with held-out query set and measure recall and search time with several sets of parameters.
    • -
    -
  • -
- - T3 will maintain different leaderboards for each dataset based on the following benchmarks: -
    -
  • Recall vs throughput using the same ranking formula as the T1/T2 track
  • -
  • Power- recall vs throughput/watt and a similar ranking formula to the T1/T2 track.
  • -
  • Cost measured as cost/watt (measured as queries/second/watt and MSRP/watt)
  • -
  • Total cost normalized across all tracks.
  • -
- We will provide the exact details on how we collect and compute these benchmarks as well as additional machine and operating system specification before the competition begins. -

-
- -
-

Benchmark Datasets

- We intend to use the following 6 billion point datasets. -
    -
  • BIGANN consists of SIFT descriptors applied to images from extracted from a large image dataset.
  • -
  • Facebook SimSearchNet++ is a new dataset released by Facebook for this competition. - It consists of features used for image copy detection for integrity purposes. - The features are generated by Facebook SimSearchNet++ model.
  • -
  • Microsoft Turing-ANNS-1B is a new dataset being released by the Microsoft Turing team for this competition. - It consists of Bing queries encoded by Turing AGI v5 that trains Transformers to capture similarity of intent in - web search queries. An early version of the RNN-based AGI Encoder is described in a - SIGIR'19 paper and a blogpost.
  • -
  • Microsoft SPACEV-1B is a new web search related dataset - released by Microsoft Bing for this competition. - It consists of document and query vectors encoded by Microsoft SpaceV Superior model to capture generic intent representation.
  • -
  • Yandex DEEP-1B image descriptor dataset consisting of the projected - and normalized outputs from the last fully-connected layer of the GoogLeNet model, which was pretrained on the Imagenet classification task.
  • -
  • Yandex Text-to-Image-1B is a new cross-model dataset (text and visual), - where database and query vectors have different distributions in a shared representation space. The base set consists of Image embeddings produced by the - Se-ResNext-101 model, and queries are textual embeddings produced by a variant of the DSSM model. Since the distributions are different, a 50M sample - of the query distribution is provided.
  • -
- -

- All datasets are in the common binary format that starts with 8 bytes of data consisting of num_points(uint32_t) - num_dimensions(uint32) followed by num_pts X num_dimensions x sizeof(type) bytes of data stored one vector after another. Data files - will have suffixes .fbin, .u8bin, and .i8bin to represent float32, uint8 and int8 type data. Note that a different query set - will be used for evaluation. The details of the datasets along with links to the base, query and sample sets, and the ground truth nearest neighbors - of the query set are listed below. -

- -

- The ground truth binary files for k-NN search consist of the following information: num_queries(uint32_t) - K-NN(uint32) followed by num_queries X K x sizeof(uint32_t) bytes of data representing the IDs of the K-nearest neighbors of the - queries, followed by num_queries X K x sizeof(float) bytes of data representing the distances to the corresponding points. The distances - help identify neighbors tied in terms of distances. In recall calculation, returning a neighbor not in the ground truth set but whose distance is tied - with an entry in the ground truth is counted as success. -

-

- The ground truth binary files for range search consist of the following information: num_queries(int32_t) followed by the total number - of results total_res(int32_t) followed - by num_queries X size(int32_t) bytes corresponding to num_results_per_query for each query, followed by total_res X sizeof(int32_t) - bytes corresponding to the IDs of the neighbors of each query one after the other. -

-

- The ground truth files for the first 10M slice, the first 100M slice, and the complete 1B set of each dataset against the respective query set can be downloaded - here(10M), - here(100M), and - here(1B). -

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Dataset Datatype Dimensions Distance Range/k-NN Base data Sample data Query data Ground truth Release terms
BIGANN uint8 128 L2 k-NN 1B points 100M base points 10K queries link CC0
Facebook SimSearchNet++* uint8 256 L2 Range 1B points N/A 100k queries link CC BY-NC
Microsoft Turing-ANNS* float32 100 L2 k-NN 1B points N/A 100K queries link link to terms
Microsoft SPACEV* int8 100 L2 k-NN 1B points 100M base points 29.3K queries link O-UDA
Yandex DEEP float32 96 L2 k-NN 1B points 350M base points 10K queries link CC BY 4.0
Yandex Text-to-Image* float32 200 inner-product k-NN 1B points 50M queries 100K queries link CC BY 4.0
- * new datasets
- We recommend using Axel for downloading BIGANN, Facebook-SSN++, Yandex DEEP1B and T2I datasets.
- We recommend using AzCopy for downloading Microsoft datasets. -
- -
-

Metrics

- The competition will measure recall@10 of the algorithms on the 6 data sets a private query set (unreleased) at a fixed query throughput. - Track T1 measures recall of algorithms at 10000 Queries/second (on 32 vCPUs), T2 measures recall at 1500 Queries/second, T2 measures recall at 2000 Queries/second. - The primary metric for comparison in each track will be the sum of improvements in recall over the baseline at the target QPS over all datasets. - Additionally, track T3 will also rank entries by power and cost per query. See this - notebook - for power and cost analysis. - A team has to publish an algorithm and commit to benchmarking on at least 3 datasets to be considered for ranking. Recall regression on a dataset selected - by a team will be continued as a negative score. - The recall@10(AP for SSN++-1B dataset) of the baseline algorithms on each dataset for the public query set is listed below. - - - - - - - - - - - - - - - - - -
Track Algorithm Search MachineTarget Queries/secBIGANN-1B SSN++-1B Turing-ANNS-1B SPACEV-1B DEEP-1B Text-to-Image-1B
Track 1 FAISS-CPU Azure F32s_v2 32vCPUs + 64GB RAM10000 0.634 0.753 0.703 0.728 0.650 0.069
Track 2 DiskANN Azure L8s_v2 8vCPUs + 64GB RAM + 1TB SSD 1500 0.949 0.16274 0.936 0.901 0.937 0.488
Track 3 FAISS-GPU NVIDIA V100 + 700GB RAM2000 0.927 TBA 0.910 0.850 0.942 0.86
-
- -
- Baseline DiskANN indices for T2 can be downloaded using "azcopy copy 'https://comp21storage.blob.core.windows.net/publiccontainer/comp21/diskann-T2-baseline-indices' 'local_folder' --recursive". - Note that this would take some time as the indices are large. All indices were built using R and L parameters set to 100. - Search for T2 used 16 threads and beamwidth 4. The Ls parameter was varied to tune recall vs QPS.
- Update: T2 baseline results have been modified after measuring via pybind11 interface on docker. There was a 30-40% QPS loss using this interface - as compared to direct measurements of C++ code from commandline. As a result, the QPS target has now been lowered, and the recall is reported at this threshold. - - -
-

Call for Participation and Timeline

-

- Participation is open to all teams interested in developing new algorithms or re-implementing - existing algorithms more efficiently either in software or hardware. Participants are - requested to submit a brief document through CMT - for each track they will be competing in. The document should contain the following details: -

    -
  • Name, email and affiliation of each participant in the team
  • -
  • A name and/or URL for the submission.
  • -
  • [Optional] To receive Azure credits for developing new ideas, please submit your request - by June 30th with preliminary data on smaller scale datasets and why you think - your algorithm will work well at billion scale. This will be used by the organizers to select strong - entries. We request teams who already have access to infrastructure (e.g. those from industry or - with access to large university clusters) to skip this.
  • -
- - - For Track T3, the document should contain the following additional details to help organizers plan - and assess eligibility for seperate leaderboards: -
    -
  • Type of hardware, e.g., PCIe extension board, rack-mounted system, or other.
  • -
  • Evidence of the retail MSRP of the hardware, i.e., pricing on website or copy of the customer invoice.
  • -
  • If hardware will be sent to GSI Technology (at the participants expense) or if organizers will given remote access to the systems. - For remote system access participants, whether their system supports standard IPMI power monitoring. - If not IPMI, then an equivalent power monitoring interface must be available. -
  • Operating system requirements.
  • -
  • Whether the participant requires a separate machine for index building. We have limited Azure-based - Fsv2-series machines and some bare-metal machines managed by the T3 organizers.
  • -
-

- -

Consent Forms

- Please review and complete the consent form for participation in Tracks T1/T2 - and Track T3. Note that there are separate consent forms - for the standard and custom hardware tracks. Completing the form is necessary for participation. - -

-

Timeline (subject to change)

-
    -
  • May: release of data, guidelines, and a call for participation. Registration open.
  • -
  • June: Baseline results, testing infrastructure and final ranking metrics released.
  • -
  • July 11th: Participants in need of compute resources to submit an expression of interest.
  • -
  • Mid-July: Allocation of compute resources.
  • -
  • July 30th: Final deadline for participants to submit an expression of interest through CMT.
  • -
  • October 22nd: End of competition period. Teams to release of code in a containerized form, and complete a pull request to the eval framework with code to run the algorithms.
  • -
  • October 29th: Participants submit a brief report outlining their algorithm and results.
  • -
  • Mid-November: Release of preliminary results on standardized machines. Review of code by organizers and participants. Participants can raise concerns about the evaluation.
  • -
  • Early December: Final results published, and competition results archived (the competition will go on if interest continues).
  • -
  • During NeurIPS, organizers will provide an overview of the competition and results. Organizers will also request the best entries - (including leaderboard toppers, or promising new approaches) to present an overview for further discussion.
  • -
-

-
- - -
- - + + + + + + +

You will be redirected to NeurIPS'23 page soon!

+ + \ No newline at end of file diff --git a/install.py b/install.py deleted file mode 100644 index fe212da01..000000000 --- a/install.py +++ /dev/null @@ -1,78 +0,0 @@ -import json -import os -import argparse -import subprocess -from multiprocessing import Pool - - -def build(library, args, dockerfile): - print('Building %s...' % library) - if args is not None and len(args) != 0: - q = " ".join(["--build-arg " + x.replace(" ", "\\ ") for x in args]) - else: - q = "" - - try: - command = 'docker build %s --rm -t billion-scale-benchmark-%s -f' \ - % (q, library ) - command += ' install/Dockerfile.%s .' % (library) \ - if not dockerfile else ' %s .' % dockerfile - subprocess.check_call(command, shell=True) - return {library: 'success'} - except subprocess.CalledProcessError: - return {library: 'fail'} - - -def build_multiprocess(args): - return build(*args) - - -if __name__ == "__main__": - parser = argparse.ArgumentParser( - formatter_class=argparse.ArgumentDefaultsHelpFormatter) - parser.add_argument( - "--proc", - default=1, - type=int, - help="the number of process to build docker images") - parser.add_argument( - '--algorithm', - metavar='NAME', - help='build only the named algorithm image', - default=None) - parser.add_argument( - '--dockerfile', - metavar='PATH', - help='build only the image from a Dockerfile path', - default=None) - parser.add_argument( - '--build-arg', - help='pass given args to all docker builds', - nargs="+") - args = parser.parse_args() - - print('Building base image...') - subprocess.check_call( - 'docker build \ - --rm -t billion-scale-benchmark -f install/Dockerfile .', shell=True) - - if args.dockerfile: - tags = [os.path.basename(os.path.dirname(args.dockerfile))] - elif args.algorithm: - tags = [args.algorithm] - elif os.getenv('LIBRARY'): - tags = [os.getenv('LIBRARY')] - else: - tags = [fn.split('.')[-1] for fn in os.listdir('install') if fn.startswith('Dockerfile.') and not 'faissgpu' in fn] - - print('Building algorithm images... with (%d) processes' % args.proc) - - if args.proc == 1: - install_status = [build(tag, args.build_arg, args.dockerfile) for tag in tags ] - else: - pool = Pool(processes=args.proc) - install_status = pool.map(build_multiprocess, [(tag, args.build_arg, args.dockerfile) for tag in tags ]) - pool.close() - pool.join() - - print('\n\nInstall Status:\n' + '\n'.join(str(algo) for algo in install_status)) diff --git a/install/Dockerfile b/install/Dockerfile deleted file mode 100644 index 8b41dd303..000000000 --- a/install/Dockerfile +++ /dev/null @@ -1,13 +0,0 @@ -FROM ubuntu:18.04 - -RUN apt-get update && apt-get install -y python3-numpy python3-scipy python3-pip build-essential git axel wget -RUN wget https://aka.ms/downloadazcopy-v10-linux && mv downloadazcopy-v10-linux azcopy.tgz && tar xzf azcopy.tgz --transform 's!^[^/]\+\($\|/\)!azcopy_folder\1!' -RUN cp azcopy_folder/azcopy /usr/bin - -RUN pip3 install -U pip - -WORKDIR /home/app -COPY requirements.txt run_algorithm.py ./ -RUN pip3 install -r requirements.txt - -ENTRYPOINT ["python3", "-u", "run_algorithm.py"] diff --git a/install/Dockerfile.diskann b/install/Dockerfile.diskann deleted file mode 100644 index 54599646a..000000000 --- a/install/Dockerfile.diskann +++ /dev/null @@ -1,29 +0,0 @@ -FROM billion-scale-benchmark - -RUN apt-get update -RUN apt-get install -y wget git cmake g++ libaio-dev libgoogle-perftools-dev clang-format libboost-dev python3 python3-setuptools python3-pip -RUN pip3 install pybind11 numpy - -RUN cd /tmp && wget https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS-2019.PUB -RUN cd /tmp && apt-key add GPG-PUB-KEY-INTEL-SW-PRODUCTS-2019.PUB -RUN cd /tmp && rm GPG-PUB-KEY-INTEL-SW-PRODUCTS-2019.PUB -RUN cd /tmp && sh -c 'echo deb https://apt.repos.intel.com/mkl all main > /etc/apt/sources.list.d/intel-mkl.list' -RUN apt-get update -RUN apt-get install -y intel-mkl-64bit-2020.0-088 - -RUN update-alternatives --install /usr/lib/x86_64-linux-gnu/libblas.so libblas.so-x86_64-linux-gnu /opt/intel/mkl/lib/intel64/libmkl_rt.so 150 -RUN update-alternatives --install /usr/lib/x86_64-linux-gnu/libblas.so.3 libblas.so.3-x86_64-linux-gnu /opt/intel/mkl/lib/intel64/libmkl_rt.so 150 -RUN update-alternatives --install /usr/lib/x86_64-linux-gnu/liblapack.so liblapack.so-x86_64-linux-gnu /opt/intel/mkl/lib/intel64/libmkl_rt.so 150 -RUN update-alternatives --install /usr/lib/x86_64-linux-gnu/liblapack.so.3 liblapack.so.3-x86_64-linux-gnu /opt/intel/mkl/lib/intel64/libmkl_rt.so 150 - -RUN echo "/opt/intel/lib/intel64" > /etc/ld.so.conf.d/mkl.conf -RUN echo "/opt/intel/mkl/lib/intel64" >> /etc/ld.so.conf.d/mkl.conf -RUN ldconfig -RUN echo "MKL_THREADING_LAYER=GNU" >> /etc/environment - -RUN git clone --single-branch --branch python_bindings_diskann https://github.com/microsoft/diskann -RUN mkdir -p diskann/build -RUN cd diskann/build && cmake -DCMAKE_BUILD_TYPE=Release .. -RUN cd diskann/build && make -j -RUN cd diskann/python && pip install -e . -RUN python3 -c 'import diskannpy' diff --git a/install/Dockerfile.elastiknn b/install/Dockerfile.elastiknn deleted file mode 100644 index e69de29bb..000000000 diff --git a/install/Dockerfile.faiss b/install/Dockerfile.faiss deleted file mode 100644 index 191966ab6..000000000 --- a/install/Dockerfile.faiss +++ /dev/null @@ -1,9 +0,0 @@ -FROM billion-scale-benchmark - -RUN apt-get update && apt-get install -y libopenblas-base libopenblas-dev libpython3-dev swig python3-dev libssl-dev wget -RUN wget https://github.com/Kitware/CMake/releases/download/v3.18.3/cmake-3.18.3-Linux-x86_64.sh && mkdir cmake && sh cmake-3.18.3-Linux-x86_64.sh --skip-license --prefix=cmake && rm cmake-3.18.3-Linux-x86_64.sh -RUN git clone https://github.com/facebookresearch/faiss lib-faiss -RUN cd lib-faiss && ../cmake/bin/cmake -DFAISS_OPT_LEVEL=avx2 -DCMAKE_BUILD_TYPE=Release -DFAISS_ENABLE_GPU=OFF -DPython_EXECUTABLE=/usr/bin/python3 -B build . -RUN cd lib-faiss && make -C build -j4 -RUN cd lib-faiss && cd build && cd faiss && cd python && python3 setup.py install && cd && rm -rf cmake -RUN python3 -c 'import faiss; print(faiss.IndexFlatL2)' diff --git a/install/Dockerfile.faissconda b/install/Dockerfile.faissconda deleted file mode 100644 index 914fd0649..000000000 --- a/install/Dockerfile.faissconda +++ /dev/null @@ -1,17 +0,0 @@ -FROM billion-scale-benchmark - -RUN apt update && apt install -y wget -RUN wget https://repo.anaconda.com/archive/Anaconda3-2020.11-Linux-x86_64.sh -RUN bash Anaconda3-2020.11-Linux-x86_64.sh -b - -ENV PATH /root/anaconda3/bin:$PATH - -RUN conda install -c pytorch faiss-cpu -COPY install/requirements_conda.txt ./ -# conda doesn't like some of our packages, use pip -RUN python3 -m pip install -r requirements_conda.txt - -RUN python3 -c 'import faiss; print(faiss.IndexFlatL2)' - - - diff --git a/install/Dockerfile.httpann_example b/install/Dockerfile.httpann_example deleted file mode 100644 index 4b36efd6b..000000000 --- a/install/Dockerfile.httpann_example +++ /dev/null @@ -1,3 +0,0 @@ -FROM billion-scale-benchmark - -RUN python3 -m pip install flask==2.0.1 diff --git a/install/requirements_conda.txt b/install/requirements_conda.txt deleted file mode 100644 index c902620f5..000000000 --- a/install/requirements_conda.txt +++ /dev/null @@ -1,11 +0,0 @@ -ansicolors -docker-py -h5py -matplotlib -numpy -pyyaml -psutil -scipy -scikit-learn -jinja2 -pandas diff --git a/logging.conf b/logging.conf deleted file mode 100644 index 6a4595f12..000000000 --- a/logging.conf +++ /dev/null @@ -1,34 +0,0 @@ -[loggers] -keys=root,annb - -[handlers] -keys=consoleHandler,fileHandler - -[formatters] -keys=simpleFormatter - -[formatter_simpleFormatter] -format=%(asctime)s - %(name)s - %(levelname)s - %(message)s -datefmt= - -[handler_consoleHandler] -class=StreamHandler -level=INFO -formatter=simpleFormatter -args=(sys.stdout,) - -[handler_fileHandler] -class=FileHandler -level=INFO -formatter=simpleFormatter -args=('annb.log','w') - -[logger_root] -level=WARN -handlers=consoleHandler - -[logger_annb] -level=INFO -handlers=consoleHandler,fileHandler -qualname=annb -propagate=0 diff --git a/neurips21.html b/neurips21.html new file mode 100644 index 000000000..d2c6b4d9a --- /dev/null +++ b/neurips21.html @@ -0,0 +1,552 @@ + + + + + + + + Big ANN Benchmarks + + + + + + + + + + + + + + + +
+

Billion-Scale Approximate Nearest Neighbor Search Challenge: NeurIPS'21 competition track

+ +

+ + + + +

Code, Report, Results and Blogs

+ + + +
+

Why this competition?

+ In the past few years, we’ve seen a lot of new research and creative approaches for large-scale ANNS, including: +
    +
  • Partition-based, and graph-based indexing strategies (as well as hybrid indexing approaches).
  • +
  • Mixing RAM and SSD storage to efficiently store and process large datasets that exceed the size of RAM.
  • +
  • Using accelerator hardware such as GPUs, FPGAs, and other custom in-memory silicon.
  • +
  • Leveraging machine learning for dimensionality reduction of the original vectors.
  • +
+

+ In addition to an uptick in academic interest, many implementations of these algorithms at scale now appear in production + and high availability datacenter contexts: powering enterprise-grade, mission-critical, and web-scale search applications. + In these deployment scenarios, benchmarks such as cost, preprocessing time, power consumption become just as important as + the recall-vs-latency tradeoff. Despite this, most empirical evaluations of algorithms have focused on smaller datasets + of about a million points, e.g. ann-bechmarks.com. However, deploying recent algorithmic advances in ANNS techniques for + search, recommendation and ranking at scale requires support at billion or substantially larger scale. Barring a few recent + papers, there is limited consensus on which algorithms are effective at this scale. +

+ + We believe that this challenge will be impactful in several ways: +
    +
  • Provide a comparative understanding of algorithmic ideas and their application at scale.
  • +
  • Promote the development of new techniques for the problem and demonstration of their value.
  • +
  • Provide a compilation of datasets, many new, to enable future development of algorithms.
  • +
  • Introduce a standard benchmarking approach.
  • +
+ By providing a platform for those interested in this problem, we aim to encourge more collaboration and collectively advance the field at a more rapid pace. + Researchers can request Azure compute credit from a pool sponsored by Microsoft Research. +
+ +
+

Tracks

+

Standard Hardware Tracks (T1 and T2)

+

+ There are two standard standard hardware tracks: +

    +
  • Track 1: In-memory indices with FAISS as the baseline. + Search would use Azure Standard_F32s_v2 VMs + with 32 vCPUs and 64GB RAM. Index construction would use Azure + Standard_F64s_v2 VM + with 64vCPUs, 128GB RAM and an additional 4TB of SSD to be used for storing the data, index and other intermediate data.
  • +
  • Track 2: Out-of-core indices with DiskANN as the baseline. + In addition to the limited DRAM in T1, index can use an SSD for search. + Search would use Azure + Standard_L8s_v2 VMs with 8 vCPUS, 64GB RAM and a local SSD Index constrained to 1TB. + Construction would use Azure + Standard_F64s_v2 VM + with 64vCPU, 128GB RAM and an additional 4TB of SSD to be used for storing the data, index and other intermediate data.
  • +
+ Participants are expected to release their code for index building and search which the organizers will run on separate machines. + Participants provide a configuration for their index build code that would complete in 4 days for each dataset. + The protocol for evaluation is as follows: +
    +
  • [on indexing machine] participants will be given a local path with 1B vector dataset.
  • +
  • [on indexing machine] participants build an index from the 1B vectors and store back to local disk.
  • +
  • [on indexing machine] Stored index is copied out to a temporary cloud storage location by the eval framework.
  • +
  • [on search machine] organizers load the index from cloud storage to a local path and provide the path to the search code.
  • +
  • [on search machine] organizers perform searches with held-out query set and measure recall and time to process the queries with several sets of parameters.
  • +
+

+ + Finalized details for build and search hardware timing will be released along with the the eval framework. + +

Custom Hardware Track (T3)

+

+ Participants can use non-standard hardware such as GPUs, AI accelerators, FPGAs, and custom in-memory silicon. + In this track, participants will either 1) send their hardware, such as PCI boards to GSI Technology or 2) evaluate + themselves using the scripts made available by the organizers. For T3 participants sending hardware, + we will make specific delivery arrangements at participant’s expense. We will install the hardware on a system under + the organizers control (we have a few bare-metal options available) and follow any installation directions provided. + Participants will be allowed to temporarily log into the machine to finalize any installation and configuration, + or for debugging installation as needed. For T3 participants running the evaluation themselves, we request remote ssh + access and sudo accounts on the systems so that the organizers can verify the system and hardware (such as IPMI support, + minimum resource availability such as disk storage for datasets). + + The evaluation phase will proceed like T1/T2, with a few modifications. +

    +
  • For participants that send their hardware, T3 organizers will provide remote access to a separate indexing machine. +
      +
    • [on separate indexing machine] participants download 1B vector dataset and store to local disk
    • +
    • [on separate indexing machine] participants build an index from the 1B vectors and store back to local disk
    • +
    • Stored index is copied to eval machine
    • +
    • [on eval machine] T3 organizers load the index from local disk
    • +
    • [on eval machine] T3 organizers provide index with held-out query set and measure recall and time to process the queries with several sets of parameters. + Index search code can use internal parallelism to batch process the queries.
    • +
    +
  • +
  • For participants that give us remote access to systems, participants are responsible for building their index. +
      +
    • [on indexing machine] participants download 1B vector dataset and store to local disk
    • +
    • [on indexing machine] participants build an index from the 1B vectors and store back to local disk
    • +
    • Stored index is copied to eval machine
    • +
    • [on eval machine] T3 organizers load the index from local disk
    • +
    • [on eval machine] T3 organizers perform searches with held-out query set and measure recall and search time with several sets of parameters.
    • +
    +
  • +
+ + T3 will maintain different leaderboards for each dataset based on the following benchmarks: +
    +
  • Recall vs throughput using the same ranking formula as the T1/T2 track
  • +
  • Power- recall vs throughput/watt and a similar ranking formula to the T1/T2 track.
  • +
  • Cost measured as cost/watt (measured as queries/second/watt and MSRP/watt)
  • +
  • Total cost normalized across all tracks.
  • +
+ We will provide the exact details on how we collect and compute these benchmarks as well as additional machine and operating system specification before the competition begins. +

+
+ +
+

Benchmark Datasets

+ We intend to use the following 6 billion point datasets. +
    +
  • BIGANN consists of SIFT descriptors applied to images from extracted from a large image dataset.
  • +
  • Facebook SimSearchNet++ is a new dataset released by Facebook for this competition. + It consists of features used for image copy detection for integrity purposes. + The features are generated by Facebook SimSearchNet++ model.
  • +
  • Microsoft Turing-ANNS-1B is a new dataset being released by the Microsoft Turing team for this competition. + It consists of Bing queries encoded by Turing AGI v5 that trains Transformers to capture similarity of intent in + web search queries. An early version of the RNN-based AGI Encoder is described in a + SIGIR'19 paper and a blogpost.
  • +
  • Microsoft SPACEV-1B is a new web search related dataset + released by Microsoft Bing for this competition. + It consists of document and query vectors encoded by Microsoft SpaceV Superior model to capture generic intent representation.
  • +
  • Yandex DEEP-1B image descriptor dataset consisting of the projected + and normalized outputs from the last fully-connected layer of the GoogLeNet model, which was pretrained on the Imagenet classification task.
  • +
  • Yandex Text-to-Image-1B is a new cross-model dataset (text and visual), + where database and query vectors have different distributions in a shared representation space. The base set consists of Image embeddings produced by the + Se-ResNext-101 model, and queries are textual embeddings produced by a variant of the DSSM model. Since the distributions are different, a 50M sample + of the query distribution is provided.
  • +
+ +

+ All datasets are in the common binary format that starts with 8 bytes of data consisting of num_points(uint32_t) + num_dimensions(uint32) followed by num_pts X num_dimensions x sizeof(type) bytes of data stored one vector after another. Data files + will have suffixes .fbin, .u8bin, and .i8bin to represent float32, uint8 and int8 type data. Note that a different query set + will be used for evaluation. The details of the datasets along with links to the base, query and sample sets, and the ground truth nearest neighbors + of the query set are listed below. +

+ +

+ The ground truth binary files for k-NN search consist of the following information: num_queries(uint32_t) + K-NN(uint32) followed by num_queries X K x sizeof(uint32_t) bytes of data representing the IDs of the K-nearest neighbors of the + queries, followed by num_queries X K x sizeof(float) bytes of data representing the distances to the corresponding points. The distances + help identify neighbors tied in terms of distances. In recall calculation, returning a neighbor not in the ground truth set but whose distance is tied + with an entry in the ground truth is counted as success. +

+

+ The ground truth binary files for range search consist of the following information: num_queries(int32_t) followed by the total number + of results total_res(int32_t) followed + by num_queries X size(int32_t) bytes corresponding to num_results_per_query for each query, followed by total_res X sizeof(int32_t) + bytes corresponding to the IDs of the neighbors of each query one after the other. +

+

+ The ground truth files for the first 10M slice, the first 100M slice, and the complete 1B set of each dataset against the respective query set can be downloaded + here(10M), + here(100M), and + here(1B). +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Dataset Datatype Dimensions Distance Range/k-NN Base data Sample data Query data Ground truth Release terms
BIGANN uint8 128 L2 k-NN 1B points 100M base points 10K queries link CC0
Facebook SimSearchNet++* uint8 256 L2 Range 1B points N/A 100k queries link CC BY-NC
Microsoft Turing-ANNS* float32 100 L2 k-NN 1B points N/A 100K queries link link to terms
Microsoft SPACEV* int8 100 L2 k-NN 1B points 100M base points 29.3K queries link O-UDA
Yandex DEEP float32 96 L2 k-NN 1B points 350M base points 10K queries link CC BY 4.0
Yandex Text-to-Image* float32 200 inner-product k-NN 1B points 50M queries 100K queries link CC BY 4.0
+ * new datasets
+ We recommend using Axel for downloading BIGANN, Facebook-SSN++, Yandex DEEP1B and T2I datasets.
+ We recommend using AzCopy for downloading Microsoft datasets. +
+ +
+

Metrics

+ The competition will measure recall@10 of the algorithms on the 6 data sets a private query set (unreleased) at a fixed query throughput. + Track T1 measures recall of algorithms at 10000 Queries/second (on 32 vCPUs), T2 measures recall at 1500 Queries/second, T2 measures recall at 2000 Queries/second. + The primary metric for comparison in each track will be the sum of improvements in recall over the baseline at the target QPS over all datasets. + Additionally, track T3 will also rank entries by power and cost per query. See this + notebook + for power and cost analysis. + A team has to publish an algorithm and commit to benchmarking on at least 3 datasets to be considered for ranking. Recall regression on a dataset selected + by a team will be continued as a negative score. + The recall@10(AP for SSN++-1B dataset) of the baseline algorithms on each dataset for the public query set is listed below. + + + + + + + + + + + + + + + + + +
Track Algorithm Search MachineTarget Queries/secBIGANN-1B SSN++-1B Turing-ANNS-1B SPACEV-1B DEEP-1B Text-to-Image-1B
Track 1 FAISS-CPU Azure F32s_v2 32vCPUs + 64GB RAM10000 0.634 0.753 0.703 0.728 0.650 0.069
Track 2 DiskANN Azure L8s_v2 8vCPUs + 64GB RAM + 1TB SSD 1500 0.949 0.16274 0.936 0.901 0.937 0.488
Track 3 FAISS-GPU NVIDIA V100 + 700GB RAM2000 0.927 TBA 0.910 0.850 0.942 0.86
+
+ +
+ Baseline DiskANN indices for T2 can be downloaded using "azcopy copy 'https://comp21storage.blob.core.windows.net/publiccontainer/comp21/diskann-T2-baseline-indices' 'local_folder' --recursive". + Note that this would take some time as the indices are large. All indices were built using R and L parameters set to 100. + Search for T2 used 16 threads and beamwidth 4. The Ls parameter was varied to tune recall vs QPS.
+ Update: T2 baseline results have been modified after measuring via pybind11 interface on docker. There was a 30-40% QPS loss using this interface + as compared to direct measurements of C++ code from commandline. As a result, the QPS target has now been lowered, and the recall is reported at this threshold. + + +
+

Call for Participation and Timeline

+

+ Participation is open to all teams interested in developing new algorithms or re-implementing + existing algorithms more efficiently either in software or hardware. Participants are + requested to submit a brief document through CMT + for each track they will be competing in. The document should contain the following details: +

    +
  • Name, email and affiliation of each participant in the team
  • +
  • A name and/or URL for the submission.
  • +
  • [Optional] To receive Azure credits for developing new ideas, please submit your request + by June 30th with preliminary data on smaller scale datasets and why you think + your algorithm will work well at billion scale. This will be used by the organizers to select strong + entries. We request teams who already have access to infrastructure (e.g. those from industry or + with access to large university clusters) to skip this.
  • +
+ + + For Track T3, the document should contain the following additional details to help organizers plan + and assess eligibility for seperate leaderboards: +
    +
  • Type of hardware, e.g., PCIe extension board, rack-mounted system, or other.
  • +
  • Evidence of the retail MSRP of the hardware, i.e., pricing on website or copy of the customer invoice.
  • +
  • If hardware will be sent to GSI Technology (at the participants expense) or if organizers will given remote access to the systems. + For remote system access participants, whether their system supports standard IPMI power monitoring. + If not IPMI, then an equivalent power monitoring interface must be available. +
  • Operating system requirements.
  • +
  • Whether the participant requires a separate machine for index building. We have limited Azure-based + Fsv2-series machines and some bare-metal machines managed by the T3 organizers.
  • +
+

+ +

Consent Forms

+ Please review and complete the consent form for participation in Tracks T1/T2 + and Track T3. Note that there are separate consent forms + for the standard and custom hardware tracks. Completing the form is necessary for participation. + +

+

Timeline (subject to change)

+
    +
  • May: release of data, guidelines, and a call for participation. Registration open.
  • +
  • June: Baseline results, testing infrastructure and final ranking metrics released.
  • +
  • July 11th: Participants in need of compute resources to submit an expression of interest.
  • +
  • Mid-July: Allocation of compute resources.
  • +
  • July 30th: Final deadline for participants to submit an expression of interest through CMT.
  • +
  • October 22nd: End of competition period. Teams to release of code in a containerized form, and complete a pull request to the eval framework with code to run the algorithms.
  • +
  • October 29th: Participants submit a brief report outlining their algorithm and results.
  • +
  • Mid-November: Release of preliminary results on standardized machines. Review of code by organizers and participants. Participants can raise concerns about the evaluation.
  • +
  • Early December: Final results published, and competition results archived (the competition will go on if interest continues).
  • +
  • During NeurIPS, organizers will provide an overview of the competition and results. Organizers will also request the best entries + (including leaderboard toppers, or promising new approaches) to present an overview for further discussion.
  • +
+

+
+ + +
+

Summary of NeurIPS'21 event

+ The NeurIPS session for this competition happend on Dec 8, 2021. See slides and recordings of the talks below. + Overview Talk and Break-out session schedule (GMT). +
    +
  • 11:05-11:25: Overview Talk (slides, video)
  • +
  • 12:00-12:45: Overview of results presented by organizers, followed by Q&A
  • +
      +
    • Standard hardware tracks T1 and T2 results (slides)
    • +
    • Custom hardware track T3 results (slides)
    • +
    +
  • 12:45-13:20: Invited talk 1 by Prof. Alexandr Andoni: Learning to Hash Robustly, with Guarantees (slides, video)
  • +
  • 13:20-13:55: Invited talk 2 by Prof. Anshumali Shrivastava:Iterative Repartitioning for Learning to Hash and the Power of k-Choices (slides, video)
  • +
  • 13:55-14:30: Talks from track winners. +
      +
    • Track 1: kst_ann_t1 Li Liu, Jin Yu, Guohao Dai, Wei Wu, Yu Qiao, Yu Wang, Lingzhi Liu, Kuaishou Technology and Tsinghua University (video)
    • +
    • Track 2: BBANN Xiaomeng Yi, Xiaofan Luan, Weizhi Xu, Qianya Cheng, Jigao Luo, Xiangyu Wang, Jiquan Long, Xiao Yan, Zheng Bian, Jiarui Luo, Shengjun Li, Chengming Li, Zilliz and Southern University of Science and Technology (slides, video)
    • +
    • Track 3: OptaNNe Sourabh Dongaonkar, Mark Hildebrand, Mariano Tepper, Cecilia Aguerrebere, Ted Willke, Jawad Khan, Intel Corporation, Intel Labs and UC Davis (slides, video)
    • +
    +
  • +
  • 14:30-15:00: Open discussion on competition and future directions (github thread, video)
  • +
+

+ Abstract for Invited talk: "Learning to Hash Robustly, with Guarantees"
+ There is a gap between the high-dimensional nearest neighbor search + (NNS) algorithms achieving the best worst-case guarantees and the + top-performing ones in practice. The former are based on indexing via + the randomized Locality Sensitive Hashing (LSH), and its + derivatives. The latter "learn" the best indexing method in order to + speed-up NNS, crucially adapting to the structure of the given + dataset. Alas, the latter also almost always come at the cost of + losing the guarantees of either correctness or robust performance on + adversarial queries (or apply to datasets with an assumed extra + structure/model). + + How can we bridge these two perspectives and bring the best of both + worlds? As a step in this direction, we will talk about an NNS algorithm + that has worst-case guarantees essentially matching that of + theoretical algorithms, while optimizing the hashing to the structure + of the dataset (think instance-optimal algorithms) for performance on + the minimum-performing query. We will discuss the algorithm's ability + to optimize for a given dataset from both theoretical and practical + perspective. +

+ +

+ Abstract for Invited talk: "Iterative Repartitioning for Learning to Hash and the Power of k-Choices"
+ Dense embedding models are commonly deployed in commercial + search engines, wherein all the vectors are pre-computed, and + near-neighbor search (NNS) is performed with the query vector to find + relevant documents. However, the bottleneck of indexing a large number + of dense vectors and performing an NNS hurts the query time and + accuracy of these models. In this talk, we argue that high-dimensional + and ultra-sparse embedding is a significantly superior alternative to + dense low-dimensional embedding for both query efficiency and + accuracy. Extreme sparsity eliminates the need for NNS by replacing + them with simple lookups, while its high dimensionality ensures that + the embeddings are informative even when sparse. However, learning + extremely high dimensional embeddings leads to blow-up in the model + size. To make the training feasible, we propose a partitioning + algorithm that learns such high-dimensional embeddings across multiple + GPUs without any communication. We theoretically prove that our way of + one-sided learning is equivalent to learning both query and label + embeddings. We call our novel system designed on sparse embeddings as + IRLI (pronounced `early'), which iteratively partitions the items by + learning the relevant buckets directly from the query-item relevance + data. Furthermore, IRLI employs a superior power-of-k-choices based + load balancing strategy. We mathematically show that IRLI retrieves + the correct item with high probability under very natural assumptions + and provides superior load balancing. IRLI surpasses the best + baseline's precision on multi-label classification while being 5x + faster on inference. For near-neighbor search tasks, the same method + outperforms the state-of-the-art Learned Hashing approach NeuralLSH by + requiring only ~ {1/6}^th of the candidates for the same recall. IRLI + is both data and model parallel, making it ideal for distributed GPU + implementation. We demonstrate this advantage by indexing 100 million + dense vectors and surpassing the popular FAISS library by >10%. +

+
+ + + +
+ + diff --git a/neurips23.html b/neurips23.html new file mode 100644 index 000000000..0265b2f76 --- /dev/null +++ b/neurips23.html @@ -0,0 +1,630 @@ + + + + + + + + + + + + NeurIPS'23 Competition Track: Big-ANN + + + + + + + + + + +
+ +
+ + +
+ +
+
+
+ + +

+ NeurIPS'23 Competition Track: + Big-ANN +

+
+ +

+ Supported by + + + + +

+ +
+

+ New: the latest ongoing leaderboard has been released (March 1st, 2024).
Top entries:
+

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Filter trackOOD trackSparse track
RankAlgorithmQPS@90% recallRankAlgorithmQPS@90% recallRankAlgorithmQPS@90% recall
1Pinecone-filter85,4911Pinecone-ood38,0881Zilliz10,749
2Zilliz84,5962Zilliz33,2412Pinecone_smips10,440
3ParlayANN IVF237,9023RoarANN22,5553PyANNS8,732
4Puck19,1934PyANNS22,2964shnsw7,137
... ... ... ... ... ... ... ... ...
BaselineFAISS3,032BaselineDiskann4,133BaselineLinscan93
+

+ Note: entries by pinecone and zilliz are not open source. +
+

+

+
+ + +

+ This challenge is to encourage the development of indexing data + structures and search algorithms for practical variants of + the Approximate Nearest Neighbor (ANN) or Vector search problem. + These variants are increasingly relevant as vector search becomes commonplace. + This challenge has four tracks covering sparse, + filtered, out-of-distribution and streaming variants of ANNS. These + variants require adapted search algorithms and strategies with + different tradeoffs. Participants are encouraged to develop and + submit new algorithms that improve on the baselines for these + variants. This competition aims at being accessible to participants + by limiting the scale of the datasets to about 10 million points. +

+ + + + + + + + + + + + + + +
+
+ + + +
+
+
+

+ Tracks: Datasets, Metrics and Baselines +

+ +

+ The evaluation hardware is normalized to Azure Standard D8lds v5 + (8 vCPUs and 16GB DRAM). The index build time on this machine + will be limited to 12 hours, + except for streaming index which has stricter time limits. +

+

+ The challenge consists of 4 tracks with separate leaderboards and + participants can choose to submit entries to one or more tracks: +

+
+ +
    +
  • + Filtered Search: This task will use a random 10M slice of the YFCC + 100M dataset transformed with CLIP embeddings. In addition, we + associate with each image a "bag" of tags: words extracted from + the description, the camera model, the year the picture was taken + and the country. The tags are from a vocabulary of 200386 possible + tags. The 100,000 queries consist of one image embedding and one + or two tags that must appear in the database elements to be + considered. +
  • +
  • + Out-Of-Distribution: This task will use the Yandex Text-to-Image + 10M, cross-modal dataset where the database and query index have + different distributions in the shared vector space. The base set + is a 10M subset of the Yandex visual search database of + 200-dimensional image embeddings which are produced with the + Se-ResNext-101 model. The query embeddings correspond to the + user-specified textual search queries. The text embeddings are + extracted with a variant of the DSSM model. +
  • +
  • + Sparse: This task is based on the common MSMARCO passage retrieval + dataset, which has 8,841,823 text passages, encoded into sparse + vectors using the SPLADE model. The vectors have a large dimension + (about 30,000), but each vector in the base dataset has an average + of approximately 120 nonzero elements. The query set contains + 6,980 text queries, embedded by the same SPLADE model. The average + number of nonzero elements in the query set is approximately 49 + (since text queries are generally shorter). Given a sparse query + vector, the index should return the top-k results according to the + maximal inner product between the vectors. +
  • +
  • + Streaming Search: This task uses 30M slice of the MS Turing data + set released in the previous challenge. The index starts with zero + points and must implement the "runbook" provided -- a sequence of + insertion, deletion, and search operations (roughly 4:4:1 ratio) -- + within a time bound of 1 hour and 8GB DRAM. The intention + is for the algorithm to process the operations and maintain a compact + index over the active points rather than index the entire anticipated + set of points and use tombstones or flags to mark active elements. + More details to come. The runbook is provided in `final_runbook.yaml` + which is generated with `final_runbook_gen.py`. +
  • +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
TrackDatasetDimensionsData typeBaseline algoQPS @ 90% recallRelease terms
FilteredYFCC-10M + CLIP192uint8filter-FAISS3200 + CC BY 4.0 +
OODText2Image-10M200float32diskann4882 + CC BY 4.0 +
SparseMS MARCO / SPLADE~30Kfloat32, sparse formatLinscan101 + MS-MARCO: + Free NC
+ SPLADE: + CC BY NC SA +
StreamingMSTuring-30M-clustered100float32fresh-diskann0.883 recall@10 (45mins) + O-UDA +
+
+ + + We recommend using Axel for downloading + non-Microsoft + datasets. We recommend using AzCopy for + downloading + Microsoft datasets. + +
+
+ + + + +
+
+
+

Track Winners and Presentations

+
+ +

Filtered Search

+
    +
  • + ParlayANN IVF2: + Fusing Classic and Spatial Inverted Indices for Fast Filtered ANNS + [slides] + Authors: Ben Landrum (UMD), Magdalen Dobson Manohar (CMU), Mazin Karjikar (UMD), Laxman Dhulipala (UMD) +
  • +
+ +

Out-Of-Distribution

+
    +
  • + RoarANN: Projected Bipartite Graph for Efficient Cross-Modal Approximate Nearest Neighbor Search + Authors: Meng Chen, Yue Chen, Rui Ma, Kai Zhang, Yuzheng Cai, Jiayang Shi, Yizhuo Chen, Weiguo Zheng. All authors from Fudan University. +
  • + +
  • + PyANNS + Authors: Zihao Wang, Shanghai Jiao Tong University* +
  • +
+ +

Sparse

+
    +
  • + PyANNS + Authors: Zihao Wang, Shanghai Jiao Tong University* +
  • +
  • + GrassRMA: GRAph-based Sparse Vector Search with Reducing Memory Accesses
    + Authors: Meng Chen, Yue Chen, Rui Ma, Kai Zhang, Yuzheng Cai, Jiayang Shi, Yizhuo Chen, Weiguo Zheng. All authors from Fudan University. +
  • +
+ +

Streaming Search

+
    +
  • + Puck: + Efficient Multi-level Index Structure for Approximate Nearest Neighbor Search in Practice + [slides] + Authors: Jie Yin, Ben Huang, Baidu. +
  • +
+

+ * Zihao Wang is also an employee of Zilliz. However, he declares that the PyANNs entry was created on his time off, without any involvement from Zilliz or any of the other organizers. This entry did not declare conflict with organizers before participating. + +

Organizer Presentations

+ + + +

Invited Talks

+ + +
+
+ + +
+
+

Participation

+ + +
+

Guidelines

+ +
    +
  • + To participate, please express + interest through the CMT portal. +
  • +
  • + To request cloud compute credits + ($1000) towards development, please select the "Requesting cloud + credit" field in your CMT entry and share a brief overview of + the ideas you plan to develop with these credits in your CMT + entry. +
  • + +
  • + To get started, please see the + instructions in the README + file, and submit a Pull Request + corresponding to your algorithm(s). +
  • + +
  • + For questions and discussions, please + use the Github issues or the Discord channel. +
  • +
+
+ + +
+

Timeline (subject to change)

+ +
    +
  • + June: Baseline results, testing + infrastructure, CFP and final ranking metrics released. +
  • +
  • + End-JulyAugust 30th: Suggested deadline for requesting allocation of cloud + compute credits for development. Credits will be provided on ongoing basis. +
  • +
  • + August 30thSeptember 15th: Final deadline for + participants to submit an expression of interest through CMT. +
  • +
  • + October 30th: End of competition + period. Teams to release code in a containerized form, and + complete a pull request to the eval framework with code to run + the algorithms. +
  • +
  • + Mid-November: Release of preliminary + results on standardized machines. Review of code by organizers + and participants. Participants can raise concerns about the + evaluation. +
  • +
  • + Early December: Final results + published, and competition results archived (the competition + will go on if interest continues). +
  • +
  • + During NeurIPS: Organizers will + provide an overview of the competition and results. Organizers + will also request the best entries (including leaderboard + toppers, or promising new approaches) to present an overview for + further discussion. +
  • +
+
+
+
+ + +
+
+
+

Organizers and Dataset Contributors

+ +

+ Organizers can be reached at + big-ann-organizers@googlegroups.com. We thank + Microsoft Research, Meta, Pinecone, Yandex, and Zilliz + for help in preparing and organizing this competition. We thank + Microsoft for cloud credits towards running the competition, + and AWS and Pinecone for compute credits for participants. +

+
+ + + +

+ Supported by + + + +

+
+
+
+ + + + + + diff --git a/neurips23_slides/ANNS_for_recommendation_systems_Yury.pdf b/neurips23_slides/ANNS_for_recommendation_systems_Yury.pdf new file mode 100644 index 000000000..0335d9021 Binary files /dev/null and b/neurips23_slides/ANNS_for_recommendation_systems_Yury.pdf differ diff --git a/neurips23_slides/IVF_2_filter_Ben.pdf b/neurips23_slides/IVF_2_filter_Ben.pdf new file mode 100644 index 000000000..b3c7321e3 Binary files /dev/null and b/neurips23_slides/IVF_2_filter_Ben.pdf differ diff --git a/neurips23_slides/NVIDIA_Corey.pdf b/neurips23_slides/NVIDIA_Corey.pdf new file mode 100644 index 000000000..d834238e8 Binary files /dev/null and b/neurips23_slides/NVIDIA_Corey.pdf differ diff --git a/neurips23_slides/intro.pptx b/neurips23_slides/intro.pptx new file mode 100644 index 000000000..98b7909a7 Binary files /dev/null and b/neurips23_slides/intro.pptx differ diff --git a/neurips23_slides/streaming_puck_baidu.pptx b/neurips23_slides/streaming_puck_baidu.pptx new file mode 100644 index 000000000..7d77a2748 Binary files /dev/null and b/neurips23_slides/streaming_puck_baidu.pptx differ diff --git a/neurips23_slides/summary.pdf b/neurips23_slides/summary.pdf new file mode 100644 index 000000000..1864f5bf8 Binary files /dev/null and b/neurips23_slides/summary.pdf differ diff --git a/notebooks/check_1B_groundtruth.ipynb b/notebooks/check_1B_groundtruth.ipynb deleted file mode 100644 index 497f7dbd6..000000000 --- a/notebooks/check_1B_groundtruth.ipynb +++ /dev/null @@ -1,365 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": 1, - "id": "4011807d", - "metadata": {}, - "outputs": [], - "source": [ - "import sys\n", - "import numpy as np" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "id": "45df07dc", - "metadata": {}, - "outputs": [], - "source": [ - "sys.path.append(\"..\")\n", - "from benchmark import datasets" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "a4d10abc", - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": 6, - "id": "0d64ca07", - "metadata": {}, - "outputs": [], - "source": [ - "# the ground-truth files on https://big-ann-benchmarks.com/\n", - "\n", - "\n", - "new_gt = {\n", - " 'bigann-1B': \"https://comp21storage.blob.core.windows.net/publiccontainer/comp21/bigann/public_query_gt100.bin\", \n", - " \"ssnpp-1B\": \"https://dl.fbaipublicfiles.com/billion-scale-ann-benchmarks/FB_ssnpp_public_queries_GT.rangeres\",\n", - " 'msturing-1B': \"https://comp21storage.blob.core.windows.net/publiccontainer/comp21/MSFT-TURING-ANNS/query_gt100.bin\",\n", - " \"msspacev-1B\": \"https://comp21storage.blob.core.windows.net/publiccontainer/comp21/spacev1b/public_query_gt100.bin\", \n", - " \"deep-1B\": \"https://storage.yandexcloud.net/yandex-research/ann-datasets/deep_new_groundtruth.public.10K.bin\", \n", - " \"text2image-1B\": \"https://storage.yandexcloud.net/yandex-research/ann-datasets/t2i_new_groundtruth.public.100K.bin\",\n", - "}\n" - ] - }, - { - "cell_type": "code", - "execution_count": 22, - "id": "4dd26410", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Dataset BigANNDataset in dimension 128, with distance euclidean, search_type knn, size: Q 10000 B 1000000000\n", - "Dataset SSNPPDataset in dimension 256, with distance euclidean, search_type range, size: Q 100000 B 1000000000\n", - "Dataset MSTuringANNS in dimension 100, with distance euclidean, search_type knn, size: Q 100000 B 1000000000\n", - "Dataset MSSPACEV1B in dimension 100, with distance euclidean, search_type knn, size: Q 29316 B 1000000000\n", - "Dataset Deep1BDataset in dimension 96, with distance euclidean, search_type knn, size: Q 10000 B 1000000000\n", - "Dataset Text2Image1B in dimension 200, with distance ip, search_type knn, size: Q 100000 B 1000000000\n" - ] - } - ], - "source": [ - "# get official GT file \n", - "\n", - "\n", - "for dsname in new_gt: \n", - " ds = datasets.DATASETS[dsname]()\n", - " print(ds)\n", - " \n", - " data = urllib.request.urlopen(new_gt[dsname]).read()\n", - " open(f\"/tmp/new_GT/{dsname}\", \"wb\").write(data)\n", - " \n", - " " - ] - }, - { - "cell_type": "code", - "execution_count": 109, - "id": "c0c2545b", - "metadata": {}, - "outputs": [], - "source": [ - "def count_diff_1_result(Dref, Iref, Dnew, Inew, eps): \n", - " \"\"\" compare knn search results. Intended to normalize for: \n", - " - small variations of the distance measure (below eps)\n", - " - ordering of ties\n", - " \"\"\"\n", - " if not np.all(Dref == Dnew): \n", - " assert np.abs(Dref - Dnew).max() < eps\n", - " # attempt to do some normalization to merge nearby distances \n", - " Dref = np.floor(np.minimum(Dref, Dnew) / eps) * eps \n", - " \n", - " ndiff = 0\n", - " cur_d = -1e10\n", - " s_ref = set()\n", - " s_new = set()\n", - " for j in range(len(Iref)): \n", - " if Dref[j] != cur_d: \n", - " nd = len(s_ref ^ s_new)\n", - " ndiff += nd\n", - " if nd > 0: \n", - " pass\n", - " # print(i, cur_d, s_ref, s_new)\n", - " s_ref = set()\n", - " s_new = set()\n", - " cur_d = Dref[j]\n", - " s_ref.add(Iref[j])\n", - " s_new.add(Inew[j]) \n", - " return ndiff\n", - "\n", - "def compare_knn_res(Dref, Iref, Dnew, Inew): \n", - "\n", - " ndiff = 0\n", - " eps = Dref.max() * 1e-5\n", - " for i in range(len(Iref)):\n", - " \n", - " if np.all(Iref[i] == Inew[i]): \n", - " continue\n", - " \n", - " ndiff += count_diff_1_result(Dref[i], Iref[i], Dnew[i], Inew[i], eps)\n", - " \n", - "\n", - " return ndiff" - ] - }, - { - "cell_type": "code", - "execution_count": 110, - "id": "af4affa2", - "metadata": { - "scrolled": false - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "bigann-1B Dataset BigANNDataset in dimension 128, with distance euclidean, search_type knn, size: Q 10000 B 1000000000\n", - "raw_diff=0.9899 % diff=0.0 %\n", - "(10000, 100) (10000, 100)\n", - "ssnpp-1B Dataset SSNPPDataset in dimension 256, with distance euclidean, search_type range, size: Q 100000 B 1000000000\n", - "(7706752,) (7706752,)\n", - "msturing-1B Dataset MSTuringANNS in dimension 100, with distance euclidean, search_type knn, size: Q 100000 B 1000000000\n", - "raw_diff=0.0195 % diff=0.00024 %\n", - "(100000, 100) (100000, 100)\n", - "msspacev-1B Dataset MSSPACEV1B in dimension 100, with distance euclidean, search_type knn, size: Q 29316 B 1000000000\n", - "raw_diff=24.181163869559285 % diff=0.0 %\n", - "(29316, 100) (29316, 100)\n", - "deep-1B Dataset Deep1BDataset in dimension 96, with distance euclidean, search_type knn, size: Q 10000 B 1000000000\n", - "raw_diff=0.1864 % diff=0.0002 %\n", - "(10000, 100) (10000, 100)\n", - "text2image-1B Dataset Text2Image1B in dimension 200, with distance ip, search_type knn, size: Q 100000 B 1000000000\n", - "raw_diff=0.04773 % diff=0.0 %\n", - "(100000, 100) (100000, 100)\n" - ] - } - ], - "source": [ - "# compare with what I computed \n", - "new_basedir = \"/checkpoint/matthijs/billion-scale-ann-benchmarks/GT_1B/\"\n", - "\n", - "for dsname in new_gt: \n", - " ds = datasets.DATASETS[dsname]()\n", - " print(dsname, ds)\n", - " if ds.search_type() == \"knn\": \n", - " Iref, Dref = datasets.knn_result_read(f\"/tmp/new_GT/{dsname}\")\n", - " Inew, Dnew = datasets.knn_result_read(f\"{new_basedir}/{dsname}\")\n", - " raw_ndiff = (Iref != Inew).sum()\n", - " ndiff = compare_knn_res(Dref, Iref, Dnew, Inew) \n", - " print(f\"raw_diff={100 * raw_ndiff/ Iref.size} % diff={100 * ndiff/ Iref.size} %\")\n", - " \n", - " else: \n", - " nres_ref, Iref, Dref = datasets.range_result_read(f\"/tmp/new_GT/{dsname}\")\n", - " nres_new, Inew, Dnew = datasets.range_result_read(f\"{new_basedir}/{dsname}\")\n", - " # does not make much sense to verify, they are computed simultaneously\n", - " \n", - " print(Iref.shape, Inew.shape)\n", - " \n", - " \n", - " " - ] - }, - { - "cell_type": "markdown", - "id": "8b100230", - "metadata": {}, - "source": [ - "# Check subsets -- range" - ] - }, - { - "cell_type": "markdown", - "id": "a069dc1d", - "metadata": {}, - "source": [ - "Make sure the 10M and 100M results are a subset of 1B" - ] - }, - { - "cell_type": "code", - "execution_count": 91, - "id": "fd4bebff", - "metadata": {}, - "outputs": [], - "source": [ - "dsname = \"ssnpp-1B\"" - ] - }, - { - "cell_type": "code", - "execution_count": 92, - "id": "1ba55156", - "metadata": {}, - "outputs": [], - "source": [ - "new_basedir = \"/checkpoint/matthijs/billion-scale-ann-benchmarks/GT_1B/\"\n", - "\n", - "nres_ref, Iref, Dref = datasets.range_result_read(f\"/tmp/new_GT/{dsname}\")\n", - "nres_new, Inew, Dnew = datasets.range_result_read(f\"{new_basedir}/{dsname}\")" - ] - }, - { - "cell_type": "code", - "execution_count": 98, - "id": "2f570eb7", - "metadata": {}, - "outputs": [], - "source": [ - "for nb, ss in [(10 ** 7, \"10M\"), (10 ** 8, \"100M\")]: \n", - " ds_sub = dsname.replace(\"1B\", ss)\n", - " nres_sub, Isub, Dsub = datasets.range_result_read(f\"/checkpoint/matthijs/billion-scale-ann-benchmarks/GT_{ss}/{ds_sub}\")\n", - " \n", - " nq = len(nres_ref)\n", - " assert len(nres_sub) == nq\n", - " i0 = j0 = 0\n", - " for i in range(nq): \n", - " i1 = i0 + nres_ref[i]\n", - " j1 = j0 + nres_sub[i]\n", - "\n", - " ref_res = Iref[i0:i1]\n", - " sub_res = Isub[j0:j1]\n", - "\n", - " ref_res_sub = ref_res[ref_res < nb]\n", - " assert set(ref_res_sub) == set(sub_res)\n", - "\n", - " i0 = i1\n", - " j0 = j1\n", - " " - ] - }, - { - "cell_type": "markdown", - "id": "eed490b8", - "metadata": {}, - "source": [ - "# Check subsets -- knn" - ] - }, - { - "cell_type": "markdown", - "id": "b4b5f7a9", - "metadata": {}, - "source": [ - "Make sure the 10M and 100M results are a subset of 1B in knn sense " - ] - }, - { - "cell_type": "code", - "execution_count": 118, - "id": "7d846214", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "bigann-1B\n", - "10M diff=0.0 % (verif on 10175 / 1000000 = 1/98.3)\n", - "100M diff=0.0 % (verif on 99455 / 1000000 = 1/10.1)\n", - "msturing-1B\n", - "10M diff=0.0 % (verif on 99896 / 10000000 = 1/100.1)\n", - "100M diff=0.0 % (verif on 1000758 / 10000000 = 1/10.0)\n", - "msspacev-1B\n", - "10M diff=0.0 % (verif on 30801 / 2931600 = 1/95.2)\n", - "100M diff=0.0 % (verif on 293540 / 2931600 = 1/10.0)\n", - "deep-1B\n", - "10M diff=0.0 % (verif on 10285 / 1000000 = 1/97.2)\n", - "100M diff=0.0 % (verif on 100663 / 1000000 = 1/9.9)\n", - "text2image-1B\n", - "10M diff=0.0 % (verif on 99944 / 10000000 = 1/100.1)\n", - "100M diff=0.0 % (verif on 999862 / 10000000 = 1/10.0)\n" - ] - } - ], - "source": [ - "basedir = \"/checkpoint/matthijs/billion-scale-ann-benchmarks/GT\"\n", - "\n", - "for dsname in new_gt: \n", - " if dsname == \"ssnpp-1B\": \n", - " continue\n", - " print(dsname)\n", - " I1B, D1B = datasets.knn_result_read(f\"{basedir}_1B/{dsname}\")\n", - " nq = len(I1B)\n", - " ndiff = 0\n", - " eps = D1B.max() * 1e-5\n", - " \n", - " for nb, ss in [(10 ** 7, \"10M\"), (10 ** 8, \"100M\")]: \n", - " ds_sub = dsname.replace(\"1B\", ss)\n", - " Iss, Dss = datasets.knn_result_read(f\"{basedir}_{ss}/{ds_sub}\")\n", - " ndiff = 0\n", - " ltot = 0\n", - " \n", - " for i in range(nq): \n", - " ref_I = I1B[i][I1B[i] < nb]\n", - " ref_D = D1B[i][I1B[i] < nb]\n", - " \n", - " l = len(ref_I)\n", - " ndiff += count_diff_1_result(ref_D, ref_I, Dss[i, :l], Iss[i, :l], eps)\n", - " ltot += l\n", - " \n", - " print(f\"{ss} diff={100 * ndiff / ltot} % (verif on {ltot} / {I1B.size} = 1/{I1B.size/ltot:.1f})\")\n", - " " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "b704d902", - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.8.10" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/notebooks/compare_track1_1B_vs_2x500M.ipynb b/notebooks/compare_track1_1B_vs_2x500M.ipynb deleted file mode 100644 index 861626598..000000000 --- a/notebooks/compare_track1_1B_vs_2x500M.ipynb +++ /dev/null @@ -1,155 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": 1, - "id": "b07b0a31", - "metadata": {}, - "outputs": [], - "source": [ - "import numpy as np\n", - "from matplotlib import pyplot" - ] - }, - { - "cell_type": "code", - "execution_count": 22, - "id": "9342ed00", - "metadata": {}, - "outputs": [], - "source": [ - "%matplotlib inline\n", - "%config InlineBackend.figure_format='retina'" - ] - }, - { - "cell_type": "code", - "execution_count": 23, - "id": "7e597ea8", - "metadata": {}, - "outputs": [], - "source": [ - "import sys\n", - "sys.path.append('../')\n", - "\n", - "from track1_baseline_faiss.parse_results import parse_result_file, find_latest_version" - ] - }, - { - "cell_type": "markdown", - "id": "702544bb", - "metadata": {}, - "source": [ - "# with run logs" - ] - }, - { - "cell_type": "markdown", - "id": "db0a4a75", - "metadata": {}, - "source": [ - "Here we compare the QPS for the single machine with 1B vectors setting and the 2 machines with 500M setting." - ] - }, - { - "cell_type": "code", - "execution_count": 54, - "id": "00be8b94", - "metadata": {}, - "outputs": [ - { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAABTUAAANwCAYAAAD+zI5wAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjMuNCwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8QVMy6AAAACXBIWXMAABYlAAAWJQFJUiTwAAEAAElEQVR4nOzdd3iUVdrH8e+Z9F5ISOihF0EQUBGkKSqCil3W3nVd17q2bairrr6uXde29oYNUcQKUgWlNwHpHUJCKunJnPePZyaFFAIkmST8Ptd1rpnnPO1MRpLb+znFWGsRERERERERERERaSpcvm6AiIiIiIiIiIiIyKFQUlNERERERERERESaFCU1RUREREREREREpElRUlNERERERERERESaFCU1RUREREREREREpElRUlNERERERERERESaFCU1RUREREREREREpElRUlNERERERERERESaFCU1RUREREREREREpElRUlNERERERERERESaFCU1RUREREREREREpElRUlNERERERERERESaFCU1RUREREREREREpElRUlNEmiVjzBZjjDXGjDjE8x70nPd2vTRMRERERKQGnljUGmOSfN0WEZHGTElNERE5bMaYtsaYy4wxzxpjfjbG5HqC8D21OPftckF7+VJijEkzxsw1xtxljAlpiM8iIiIiIg3LGNPVGHOtMea/xpiFxpgCTzz4Sy3OnVlNLFlkjNlrjJlmjLneGOPXEJ9FRBqev68bICLSyKQCvwO7fd2QJuIvwO1HeI0iIK3cdjAQAwzxlOuMMSOstSlHeB8RERERaVyeBMYd4TXygcxy22FAPHCqp1xhjBltrc07wvuISCOjnpoiIuVYa1+01vaw1j7g67Y0ERbYCHyMk+B8+jCuMc9am1iuRAPRnuu5gV7A43XTXBERERFpREqANcC7wG3Ae4dxjY8PiCUjgETg/zz7h+HElSLSzKinpoiIHIm/WGvv9G4YY66ui4taazOBp4wx3YEbgLPr4roiIiIi0qhcbK0t8W4YYx6si4taa5OB+4wxxwGn4cSS/6qLa4tI46GemiLS7Blj2htj/meM2W6MyTfGbDbG/McYE1XFsTUuFGSMCfEc87vnWruNMRONMb2NMUneuXyqOC/cGHORMeYDY8wqY0yGMSbPGLPBGPOaMaZrDe0vnSze81leN8bs8Mw55P0skdWcW7pgkjEm1hjztOecAmPMTs+1Wh3Cj7OC8kFoPVnheQ2r5/uIiIiINAhjjMsY82djzHJPPJhijJlijDmpFufGG2P+bYxZaYzZb4zJ8cSWjxpjYg9ybm9jzJueWDDfE4/+bIy52RgTUMXxFWJbY8wQY8zXnvbmGmOWGWNuNcYcdl5BsaSIHAn11BSR5q4L8AnOvDr7cYZLJwF3A+OMMcOstbWaP9OTBJ0ODPBUFQKhwCXAWcCNNZx+NfBCue1snAdLnT3lUmPMudbaaTVcoy/wJhBb7nzvZxlujBlsrS2q5ty2wNtAByAX5+fQGrgeGGWM6W+tTa/h3r7Sx/O6waetEBEREakDxhh/4DPK5pEsxvn/8rOA0caYS2o492TgS5xYEJxYtAQ4xlOuMMacZq39vYpzbwWeo6xjUw4QDgz2lEuMMWOttbnV3PsCYKKnrRlAAE5s+gJwqjHmImttcW1+Bg1MsaRIM6aemiLS3P0HZ+LwoZ75dcKAc3EWBOoCvHMI13oeJ6GZA1wBhFtro4DewErgpRrO3YcT9A0Goq21kTgL4vQEPvC060NjTE1Pkd8GlgF9POeHA9cBBcBAnGHa1XkBSAcGW2vDPOeOwwlKk4BGNYeoMSbSGHMHTtIV4BkfNkdERESkrtyHE4O5gXuAKGttDNAJmIbzALsSY0wHYApOQvN/QA8gBCeG7A18B7QDJh242rcxZhxOLJgH/BVIsNaGe84/HWeRzBHUHG+94WlfJ097o4F7PZ/jXM/7RsPTo/UxnM/nxknoikgzo6SmiDR3QcCZ1tq5ANZat7X2S+Biz/7TPE+9a2SM6YSTyAS40Vr7vrdXpLX2N2A0TqBYJWvtR9ba26y18z3zRWIdaz3XnYbTm/TCGpqxExhjrV3lOb/AWvsm8Lpnf03nFgCjrLXzPecWW2u/Ah6pxbn1bbAxZk+5koGTiH4GWA5caa1924ftExERETlinofX3uTfv6y1//H2jLTWbsZJDu6s5vRHcRKJz1trb7DW/u6Ja60nFh2HEzf1As4rd08/yhJ6V1hr/22t3eu5Z5G19kfgTJyH9tfWMC3RDuBcTzux1uZYa5+kbJ7K+w7ycL4+XXJALJkN7MV5aD8HOMta+5OP2iYi9UhJTRFp7j6x1lYabmKtnQHM82zWJqF3HmCA7cBHVVwvE3jlcBporbXAVM/mkBoOfdpaW1BF/WTPa+8azn3NWruvhnM7+jAQDQASypXyc53GAi2NMcYXDRMRERGpQ6cDkTgPmyv1ivTEef85sN4YEwJc5Nl8uqoLW2sLcYa1g7MwjtcInOmHtlhrv6jm3M3ALzhDy0dU0/anqolDnwbycT7XaVXsbwjBVIwlw8vtawG09EWjRKT+aU5NEWnuZtawbxbOcPD+tbjOcZ7Xnz1JyKrMqekCxpi2wJ+BUTjzaEZQ+eFS6xousbCaeu8T/ZgjOBecp/85NVyjvsyy1o7wbnh6FLTH6f36L5zgvhfOUHsRERGRpsobcy7zjtypwqwq6gYCgZ73v9bwrDfE89quXN1gz2trY8yeGtrmfajcrpr9M6uqtNZmGWOWAifhfL7JNdyjvrxjrb3au2GMCcSZXukC4O/A28aY9tZarX4u0swoqSkizV11Q3jK74uvxXXiPK81LSq0q7odxpjhwNdUfHKcifNkG5wgNJKaV2bMrqbee42afqdXea61Nr9cYFy66qUxZhJlQXB5H1trb6/hPkfMswrmZuBlY8wmnDmirjXGvOWdRkBERESkCfLGnNXGjFQdu5YfEp5Qi/uEVnFu4GGce7B2HbivNKY2xiyk6gTpf6y1lXqj1iVPr9V1wL+NMWk4o6n+YYx53zt8XkSaBw0/F5Gj2aEMaT7s4c/GmADgfZyE5jRgGBBirY221iZaaxOBu470PnUslorDeKoaGl7vrLXfA95eBRfXdKyIiIhIM+X9//Z0a62pRRlRxblf1PLcBw+jfVXFr/FUHUuGV3FsfXoHsDgP7887yLEi0sSop6aINHc1Def2PrlOqcV1vMdUN3l6TftOAtoCacA474TwB6jNk/MGc0Aw7GvbgEScVUFFREREmipvPFlTfFrVvmTPa4wxJtFaW9Mw8urO7XUI51SlNbClmn2VYmprbdIR3q9OeEYlpeDMq6lYUqSZUU9NEWnuhtdi35JaXGep53VIDYvWDK2mvq3ndV01CU1w5tmUqrXxvBb5tBUiIiIiR8Ybc/YzxkRWc0xVsesioNjz/vxDvOd8z2t3Y8wxh3hueVXG1MaYCMrmCq1NTN2gjDFBlE0jpVhSpJlRUlNEmrtLjDGVnsoaY4ZRttL4p7W4zmScoSvtqGIYtCcwvbmac70TwXc1xgRXce7pwMhatOGoY4wZQllSs9EFyiIiIiKH4HsgCwgCKs1R7lng5u4D66212cDnns2/G2OqHeFjjPE3xpQf4j0dZ9QLwDOeBRmrO7emRSfv9rTvQHfgrD6eBfxQw/m+cgFleQ/FkiLNjJKaItLcFQLfGmMGAxhjXMaYs4HPPPt/tNb+fLCLWGs3Ah94Nv9njLnUGOPvuWYv4Fuqn1j9ZyAXaAG8a4xp5TkvxBhzLU6Quu+wPp2PGWMCjDFx3kLZPEmmfP1BguSqrhtijDkX+MhTlQu8WXctFxEREWlYnhE7/+fZnGCMucsYEwJgjEkCvqD61cfvx5nKqBUwzxhznqcXIp7zuxhj7gDW4KyW7r1nEfBnnIfzpwE/GGNO9I488iRBBxhjHgc21dD89sAXnnZijAk1xtwFTPDsf6KGEUnVMsYEHRBLeuNp/wNiyUOa190YE2GMuQZ4wVO1C5h0qO0TkcZNc2qKSHP3F+Ax4GdjzH7AD2elcYANwFWHcK0/A72BfjgJzjeNMfk4i+fsB24EPsRJpJay1mYYYx4AngMuAi4yxmTirHTuDyzDSdg9f+gfz+eGADOqqG9JxblKtwJJ1VxjsDGm/NxQfjgJYO8w/xxgvLW2plU3RURERJqCJ4DjgXHAU8ATnhg1GmeI+SWU9cosZa3dYowZjTN6qBNOgq7YE1OG4/T+LD38gHO/MsZch7MK+CnAL0C+MSbHc99qe2+Wcx0wEdhsjMnw3NObT/iSsmTtofoD8FYV9QOoGEvOAkZUc41LPD8brwCcRS+9UoBzrbU5h9lGEWmk1FNTRJq7DThPq9/EGQbuhzPJ+VPAQGvt7tpeyFqbgZPE+5fnugbIx+lNeALOk3GAjCrOfR5nDiRvr01/YC3O0+3BQPYhfq7mJICKq2LG4SQyV+B8T8dYa7/2XfNERERE6oa1thhnSPRtOLFOMVACTAWGW2ur7U1orV0I9ADuA+bhxI/RQB7OvJtPAMdba2dVce5bQHfgWeA3z32jcEYLzcDpCJBUw70/x5kuaaqnvcXAcpyH/ud7PpevBFMxlozBGQ6/CHgYJ5Zc6LvmiUh9Mdbagx8lIiIH5XkC/j9gViNbPVxERERE5JB4hppvBrDWVrdQpoiIz6inpohIHfBMnO6d8P1HX7ZFREREREREpLlTUlNEpJaMMe2NMW8ZY4YaY8I8dcYYcwLOapZ9cIa4v+HLdoqIiIiIiIg0dxp+LiJSS8aYLsD6clUZOHP4BHu284GLNP+jiIiIiDR1Gn4uIo2demqKiNTeLuBunF6ZW4FAnNUl1wOvAscqoSkiIiIiIiJS/9RTU0RERERERERERJoU9dQUERERERERERGRJkVJTREREREREREREWlSlNQUERERERERERGRJkVJTREREREREREREWlSlNQUERERERERERGRJsXf1w1ozowxEUCEZ/MXIBzY7LsWiYiIiNRKEpBlre3o64bIoTPGbAYigS0+boqIiIjIwSRxmHGnkpr1625ggnfD39+fY489Nqa+bpadnQ1ARETEQY6UxkzfY9On77Dp03fY9Ok7PDJr1qwhLy/P182QwxcZEhIS27Nnz9i6vKj+XTUP+h6bD32XzYO+x+ZB3+PhO5K4U0nN+vUU8Jrn/Xe9e/fus3jx4nq72cyZMwEYMWJEvd1D6p++x6ZP32HTp++w6dN3eGQGDBjAkiVLtvi6HVJ7B4wQ2tm9e/fYuo479e+qedD32Hzou2we9D02D/oeD9+RxJ1KatYja202kA1gjClyuTSFqYiIiIjUiwojhJKTk33YFBEREZH6pyybiIiIiEjT9xTQxlNWJiQk+Lg5IiIiIvVLPTVFRERERJo4jRASERGRo42iHREREREREREREWlS1FOzHh0wYXuA2+32ZXNERERERERERESaBfXUrF93Azs9pY8mbBcRERERERERETlySmrWL03YLiIiIiL1zhgTYYxpbYxpjUYIiYiIyFFAw8/rkSZsF5HGxu12k5aWRnZ2NgUFBVhrfd2kRik0NBSANWvW+Lglcrj0HZYxxhAUFERERASxsbEoHmm27gYmeDc0QkhEfEkxZ+0pZmke9D06GjruVFJTROQo4Xa72b59O7m5ub5uSqPnDUqk6dJ3WMZaS35+Pvn5+eTk5NCuXTslNpunp4DXPO+/S0hI6OPLxojI0Usx56FRzNI86Ht0NHTcqaSmiMhRIi0tjdzcXPz9/UlMTCQsLEyJjWpkZ2cDEBERcZAjpbHSd1jG7XaTk5PDnj17yM3NJS0tjbi4OF83S+qYRgiJSGOhmPPQKGZpHvQ9Oho67tRvFhGRo4T3D21iYiIREREKLkWOEi6Xi4iICBITE4Gy3wUiIiL1QTGnyNGroeNO9dRszjbPAf8gCAyDwHCnBIWDXyAY4+vWiUgDKygoACAsLMzHLRERX/D+2/f+LhCpN5k7IWMr+Ac7JSC43PsQ8AsCJTlEmi3FnCLSUHGnkpr1yBgTAXj7Hjf8KpQfXgJFOZXrXf4Vk5wHJj1rtR3hvAaFQ0CYAlORJsA7QbueloscnYzngaYWa5B6t/pL+P6Bmo/xCyqX8AwC/5CKyc+qkqHe4/yDPNtVHFe6Xf64cufpwb5IvVPMKSINFXcqqVm/fLcKpdtddUITwF0M+RlOqSsBYWVJzgOTnpW2vaWqbc9x/kF11zYREREpDS5F6l1x/sGPKSlwSkFm/benvENKmtaUbK0maVplslVxrYiIHF0aKu5UUrN++W4VypJC6DgMCnOgYL/zWpjtvLcldX+/ohyn5Oytm+u5AiomOQ9Mela3HRgOUW0hsXfdtENERESkCfD5CKHyIhKh3SAozoOifCfJ6S1F+U4y01e87WhIbY+HK75w4lYRERGpM0pq1iOfrkIZEAxXTamqUVBcUJbkLE16ektN2zll9QXltoty67797qIj603aaxxc8Ab4BdRlq0REREQaK9+NEDpQv0udUh2320lsFuU5cWmx57Uor2Lys/R9LY8r3fYed8A13EUN9zMob8dC+O0L6H+lb+4vIiLSTCmpebQxxkl4BgRDWIu6uaa7xJPgrCbpWWE7u3Lv0froTbr6S2dBpPNe03yfIiJHuZkzZzJy5EgmTJjAgw8+6OvmiNQX340QOlQuF7hCnKHaDcld4klyVpcYPdSkqTfZWk3itSC7rFdqxvaG/awiItLg5syZw9ixYxVzNiAlNeXIufwgONIpdaE2vUkLc5xA8cDE6f5k2Dbfuc7KTyE4Csb8R5PCi0ipffv28cUXXzB16lRWrlzJzp07CQwMpE+fPlxzzTVcc801R3wPbxKtOvfddx+PP/54pfqSkhJeeOEF3nzzTdavX09ISAiDBg3i73//O4MHD67yWmlpaTz88MNMnjyZ3bt306JFC0aPHs3DDz9M27ZtKxy7ZcsWOnbsCDgrEu7evZuIiMrDIa21dOnShU2bNgEwY8YMRowYUduPX6M5c+bw7LPPMm/ePNLS0oiNjaVPnz7ccccdjBkzpsZzr7vuOt58800A1q9fT5cuXeqkTXXh6quv5p133mHz5s0kJSX5ujlyFPLpCKGmwuXnma6ogVZkXvg/mHq38z57d8PcU0QajdrEnEf6u1oxZ/UUcx4dlNSUxudIepNa6wSPi95wthf+z0lsnvrPum+niDRJn376KX/84x9p1aoVI0eOpH379iQnJzNp0iSuv/56vv32W9588806mdx6+PDhVQZmJ598cqU6ay3jx4/ns88+o3v37tx6662kpaXx8ccfM2zYMD7//HPGjRtX4Zx9+/YxePBg1q1bxymnnML48eNZu3Ytb731FlOnTmX+/Pl06tSp0r38/f3Jycnho48+4sYbb6y0f/r06WzatAl/f3+Ki4sP/wdwgEceeYR//OMfxMXFcdZZZ9GqVStSU1NZunQpM2fOrDHAnDJlCm+++Sbh4eHs37+/ztokIlJvIlqVvd/vw+kARMQnahNzfvrpp3VyL8WcFSnmPHooqSnNizFOz8yCLKenJsCcpyA4Gobc5tOmiUjj0K1bN7766ivGjh1b4en4Y489xgknnMDnn3/OueeeWymYOxwjRoyo9dCTiRMn8tlnnzF48GCmT59OcHAwADfffDMnn3wyN9xwA6ecckqFp9x//etfWbduHXfeeSdPP/10af3zzz/P7bffzi233MJ3331X6V4DBgxg69atvP7661UGmK+//jpBQUGccsopfPvtt4f4qav26aef8o9//INRo0YxadKkSk/ri4qqn+suJSWFG264gUsuuYQ9e/Ywa9asOmmTiEi9Ck8se5+9x3ftEBGfqE3MOWnSJE4//fQjvpdizjKKOY8uGpcizY/LBee+DN1Gl9X9+A9Y/LbPmiQijccpp5zC2WefXWm4T2JiIjfffDPgDFfxSk9PJykpiaCgIBYvXlzhHLfbzYgRIzDG8P777x9Ru15++WXAebLsDS4Bjj/+eC655BJSUlL47LPPSutzcnJ47733CAsL46GHHqpwrVtvvZWkpCS+//770uE85fn7+3PNNdewaNEili1bVmFfamoqkydP5oILLiA2NvaIPpOX2+3mvvvuIzQ0lA8//LDK4UcBAdUv7OYNgl966aUjbsv8+fMZNWoUUVFRREREcMYZZ7Bo0aIKx9x///0YY3j33XervMbixYsxxnD22WcDYIzhnXfeAaBjx44YYzDGVBoSlJaWxgMPPEDPnj0JCQkhKiqKU089lR9++KHSPQoLC3n++efp378/MTExhIaGkpSUxLhx45g2bdoR/xxEpAFEJJS9V1JT5KhTm5hz5syZpfWKOY+cYk7H0RRzKqkpzZNfAFz0NnQo191+yh2wapKvWiQiTYA3yPH3LxvIEBMTw8SJE3G73VxyySVkZWWV7nvooYeYNWsWV199NZdffnml623YsIEXX3yRxx57rHTOoqoUFBQwb948QkNDGTp0aKX9Z555JgA//fRTad38+fPJy8tjyJAhlQI2l8tV+tR/xowZVd7z+uuvxxjD//73vwr177zzDoWFhdxwww1Vnnc45s2bx+bNmxkzZgwxMTFMnTqVJ554gueee4758+fXeO7bb7/N5MmTeeWVV2jR4sgWuPv1118ZMWIEQUFB/OlPf+LMM89k+vTpDB06tEIi++abb8blcvHqq69WeR1v/U033QTAhAkT6Nu3LwC33347EyZMYMKECdxxxx2l52zdupUBAwbw+OOPEx8fz80338wll1zCmjVrGD16NK+//nqFe1x99dXcfvvtFBUVceWVV3LbbbcxbNgwVq5cWWVPCBFphMLLJTVzUqCk7oZWikjTpphTMSco5qwT1lqVBijA4v79+9v6NGPGDDtjxox6vUeTk5dp7SvDrJ0Q6ZSHWli77gdft6pG+h6bvsb6Ha5evdquXr3a181otIqKimzv3r0tYCdNmmSzsrIq7H/iiScsYMePH2+ttfann36yLpfL9uzZ0+7fv7/CsTNmzLBAleWCCy6waWlpFY5ftWqVBWzv3r2rbNvChQstYE844YTSuhdffNEC9tZbb63ynCeffNIC9t577y2t27x5swXskCFDrLXWnnrqqTY6Otrm5uaWHtOjRw/btWtX63a77WWXXWaBI/7v+emnn7aA/dOf/mT79OlT6WcybNgwu3fv3krnbdmyxUZGRtrLL7+8tG748OEWsOvXr6/xnllZWaXfYfnv44UXXqhw3OTJky1gu3TpYktKSkrrx44dawG7YsWKCsdnZ2fb8PBw265dO1tcXFxaf9VVV1nAbt68ucr2DB8+3Bpj7EcffVShPj093fbt29cGBwfbPXv2WGutzcjIsMYYO2DAgAr38EpNTa3xs9ektr8H+vfvb4HFthHEUCqNJ+5srH/fGrUnOpXFoZP/ZG1uuq9bpO+xGWms36VizpqVjzm/++67CjGLtYo5j4QvYk6vrKwsO3XqVMWc5TRE3Kk5NeuRMSYC8D7KCHC73b5sztEpOBIunwRvnQmpv4O7CD6+Aq6YBB2qXtVN5GiVdP9UXzeh1rY8PrbOr3n//fezatUqxowZw6hRoyrtv+eee5gxYwYTJ06kb9++PP/88wQGBvLxxx8TFlZxJd34+Hgef/xxxo4dS1JSEvn5+SxatIi//vWvfP755+zZs4fZs2eXDkfKzMwEICoqqsq2eeszMjJK6w7nnAPdcMMNTJ8+nU8//ZQrr7ySOXPmsHbtWp544ok6WSjJa+/evQC88sordOzYkWnTpnHiiSeydetW7r77br7//nsuuuiiCkOw3G43V111FeHh4Tz//PN10o4uXbpwyy23VKgbN24cw4cPZ9asWcyZM4fhw4cD8Mc//pGpU6fy2muv8cILL5Qe/8EHH7B//37uuece/Pz8anXf5cuXM2vWLC688ELGjx9fYV90dDQPPfQQ5557Lp9//jm33HILxhistQQFBVW5KuqR9h4QkQbU+jjY8KPzful7sP5HGPsf6Hm2b9sl4kOKOctizjPOOIPs7OwK+xVzHj7FnEdfzKmkZv26G5jg3UhO1qqHPhHWAq74At4cDZnboDgPPrwErv4aWvX1detEpBF4/vnneeqpp+jRowfvvfdelcd457vp168fDzzwAOAMCenTp0+lY4855hiOOeaY0u3w8HBGjx7N4MGD6devHz///DNTpkyp9WJE1ul5dUhBX23OOe+884iLi+P111/nyiuv5LXXXiMgIICrr7661vepjZKSktI2ffbZZ6XDZo455hi++OILunXrxqxZs5g/fz4nnXQSAM888wyzZs1i6tSpxMTE1Ek7hg4dWmXANmLECGbNmsXSpUtLA8wzzzyTjh078t577/HEE08QGhoKwGuvvYafnx/XX399re/rHe6UmZlZ5ST+KSkpAKxZswaAyMhIzj77bKZMmUK/fv244IILGDp0KCeeeGJpO0SkiTj3Zfj6Dlj7tbO9fw98fLmT1BzzH4hIrPF0EWleFHMq5lTMWbc0p2b9egpo4ykrExISDnK41JuoNnDlZAhr6WwXZMF750Nq1XONiMjR46WXXuL222+nV69ezJgxo8aJyuPj4xk2bBjgPLm84oorDulekZGRXHrppQDMnj27tN77hNv7JPxA3jmVyj8hP5xzDhQYGMiVV17J3LlzmT9/Pp999hnnnHMOLVu2rO1HqhVvgNipU6fS4NIrJCSEM844A4AFCxYAsH79ev72t79xzTXXMGbMmDprR3V/hxMTnaRC+Z+ly+XipptuIjMzk48//hhwJmtfsmQJZ599Nq1bt671ffft2wfAjz/+yEMPPVSp/Pe//wVg//79ped8/PHHTJgwgby8PCZMmMApp5xS+t+cHpJKVYwxEcaY1saY1miEUOMRHg+XvA8Xv1sWhwKsmQIvnQBL3gVPQkBEmjfFnIo5FXPWPfXUrEfW2mwgG8AYU1RVpl4aUIvOTo/Nt8dAfibkpsK758K130F0O1+3TsTn6mN4TWP37LPPcuedd9K7d2+mT59+0MBq4sSJTJw4kbi4OFJTU7ntttsqTbZ9MPHx8YCzkqRXly5d8PPzY9OmTRQXF1eYNB4oney9W7dupXXdu3cHYN26dVXep6pzqnLDDTfw9NNPc/HFF5Ofn1+66mNd8rY1Ojq6yv3eADQvLw+A3377jYKCAt566y3eeuutKs/p2rUrAF988QXnnnturdpRXWC2Z4+zKvGBwfi1117LhAkTePXVV7nmmmsqTdZeW97rPvfcc9x22221OickJIQHH3yQBx98kO3btzN79mzefvtt3n//fbZs2VJhknkRD40QaqyMgV7joOMw+OEfzjB0cOLRr/4MKz6Bs59zYlWRo4BiTsWcijkVc9YVZdnk6JLYGy77DAI8XamzdsB758L+FJ82S0Qa3hNPPMGdd95Jv379mDFjxkGDy40bN3LjjTcSHx/PkiVLGDZsGP/73/+YOHHiId33l19+AZwnyF5BQUEMHjyY3NzcKgOHb7/9FoBTTjmltG7QoEGEhITw888/V5qLye1288MPPwAwcuTIGtvTo0cPhg4dyo4dO0hKSqpyPtEjNWzYMPz9/Vm/fj2FhYWV9q9atQqApKSk0tfrrruuyuJ9wn3RRRdx3XXXlZ5TG3PnzqWq3mveeZWOO+64CvXx8fFceOGF/Prrr/z888989NFHJCUlla7yWZ53riPvsKfyBg0aBHDYQWG7du247LLL+P777+natStz584tfRIvUo5GCDV2ITEw7kW48iuISSqr3zIHXh4Mc5/VCukizZBiTodiTsWc9eJwVhdSaTyrUJbXWFe/a5Q2TLf24biy1ShfHtIoVqO0Vt9jc9BYv0OtRFnm4YcftoAdMGCA3bdvX6X9B65CWVBQYAcMGGCNMfabb76x1lq7Y8cO26JFCxsREVFpVcS5c+dWWNXQ67333rPGGBsYGFhpxcIPP/zQAnbw4ME2Ly+vtH7BggU2MDDQxsfH28zMzArn3HjjjRawd911V4X65557zgL2jDPOqFB/4EqUXmvWrLFffPGFXbJkSYX66laifOuttyxgr7rqqkqfsTrea/3tb3+rUP/DDz9YY4yNioqy6enpB71OQ61+7jV37lwL2DZt2ljAPvbYY1Xe75577rGA/emnn6rcP3ToUOtyuewbb7xR5f4VK1bY5ORka621e/futb/88kuVnykxMdH6+/tX+O/zUGj186Oj1Ffc2Vj/vjVJBTnWfv93ax+MLotHJ0Ra+9/B1s543IlV8zIPfp3DoO+x+Wis36VizjIHizmtrRizKOacUaG+KcScXoe7+rlXc4s5rdXq5yL1p/MpcMEb8OlVYN2wZ6WzeNAfPoLQ6uc2EZGm75133uGf//wnfn5+DB06tMpVDhMTE7nssstKt++9914WL17MXXfdxZlnnglAmzZtePvttzn77LMZP3488+bNIzAwEIDLLrsMt9vN4MGDadu2Lfn5+SxcuJAFCxbg7+/Pq6++Wulp7/jx45k0aRKfffYZxx13HGeffTb79u3j448/pqSkhNdff53IyMgK5zz22GPMnDmTp59+mmXLlnHCCSewZs0avvzyS1q2bMlLL71Uq59Jjx496NGjR61/ht4nzwcOWarJ008/za+//sqjjz7K7NmzOeGEE9i6dStffPEFfn5+vP7669UOFaoro0eP5u677+bbb7+lb9++bNiwgUmTJhEcHMwbb7xR5YTuQ4YMoW/fvixfvpyAgACuvfbaKq996qmn8uSTT3LDDTdw4YUXEh4eTnR0NLfeeisAH374IaeccgrXXXcdzz//PCeeeCLR0dHs2LGDFStWsGrVKubPn0/Lli3ZuXMngwYNomfPnvTv35927dqRlZXF119/zZ49e7jtttuIiIio15+ViNSzwFA4/V/Q+wL46lYnFgVIXuUUAAy07AXtjoe2J0C7E50h6nW4UrCI1J/axJxJSUlccMEFpduKOStSzFmZYs4DHE4mVKXxPDEvr7E+qWvUlrxf8en4o62t/fFBa/en+qxJ+h6bvsb6HeqpuWPChAmlT1CrKyeffHLpU8mvvvrKAnbgwIG2sLCw0vXuvPNOC9jbbruttO7xxx+3o0aNsm3btrXBwcE2KCjIdurUyV599dV22bJl1batqKjIPv3007Z37942ODjYRkdH2zPPPNP+/PPP1Z6zb98+e9ttt9n27dvbgIAAm5iYaK+55hq7ffv2SsdW99S8OtU9Nb/jjjssYH/88cdaXad8W++8806blJRkAwICbGxsrD3nnHPs/Pnza32NI+mpOWHCBDtv3jx76qmn2oiICBseHm5PO+00u2DBghqv9eyzz1rAXnjhhTUe99RTT9kePXrYwMBAC9gOHTpUatOjjz5q+/fvb8PCwmxwcLBNSkqyY8aMsa+++qrdv3+/tdba9PR0+9BDD9mRI0fa1q1b28DAQJuYmGiHDx9uP/zwQ+t2uw/yU6qeemoeHaW+4s7G+vetySsutHbO09b+q2XFuLSq8niSte9fZO2s/7N20yxr87MP+Xb6HpuPxvpdKuZ01CbmHD58eGnMopizacacXuV7airmdDRE3Gms1Wp7DcEYs7h///79Fy9eXC/Xt9by7bSZBPrBqSNHYPQEt/Z+eRm+u79iXUAYnHgjnPRnCGvRoM3xzrMxYsSIBr2v1J3G+h2uWbMGgJ49e/q4JY2fd76gRv9k0kf69++Pv79/6cqRjVFdfodXX30177zzDtOmTePUU0894uv5Um1/DwwYMIAlztiwAQ3RLqlb9RV3ev++ZUZ35a2ft+DvMvj7GQL8XPi5DP4uFwF+Bn8/l7PP5bwP8DP4uZzjytf7+xkCXC7PvnLn+ZVdy89VdpxTX3ac977ec1uEBRIc4Fenn7lBZSfDphmwfQHsWADJvzkjimpiXJBwTFlPznbHQ0zHGntzNtY4RQ5dY/0uFXMeGsWd1WsKMadXXXyPzSnmhIaJOzX8vJkoKHZzy/RcAFw/fkOQvx/BAS6CA/wIDvAjyN/73vNaxf4g735/v4rHeuqCAvyIDPanTUwIoYHN6D+dQX+EqHbw078gZa1TV5QDc5+BX1/zWXJTRKQxyszMZPny5Xz++ee+bkqD2L59OxMnTqRnz54VJs0XOZrtzsxn2fYMXzejSq2igklqEUZSXCgdWoSR1MJ57dAitPHHrxEJ0He8UwAKsmHnEifBuX2h85qXXvEc7zRKe1bCojecurB4aHs8tDvBSXa2Ps4Z7i4i0oQo5pTaaOR/2aW2CorKnuK6LeQVlZBXVAIU1cv94sIDaRMTSruYENrFhtIuJpS2nveto4MJ8m9iT8l7ngXdx8CaL2HmE5DiPFGokNw84QYY/GcIi/NtW0VEfCgqKqrK1Rabmw8//JB169YxceJECgoK+Ne//qVRECIexSUH6T3oQ7sz89mdmc/8TZVXa20ZEUSSJ8GZFBdW+r5Di1AiggN80NqDCIqATsOdAmAt7NtQ1pNz+0LYuxpnFGs5OSnw+zdOAXD5Q0JvT0/OEwjOKyE/uObVl0VEfE0xp9SGkprNREFJCcF+UOSGkgaYUSB1fyGp+wtZXsVTemMgISKYdrEhTrIz1pPwjAmlXWwIraJC8HM1wn+kLhcccx70HAdrvoJZT3gCRZzk5s/PwoLXldwUETkKvPbaa8yePZt27drxzDPPVJjEX+Rod9HAdpzUOY4St6W4xE2R21LidlNUYikusRS73aWvRSWWErelqMRNsbvc+xJLkdtNSYml2FPn7Kt4vnNc+es716l4H0thsZu92fm4a4iD92YXsDe7gAVb0irtiwsPLO3R6U12dowLo0OLMKJCGknC0xiI6+qU4zyL2eVnws7FZT05ty+EgsyK57mLYfcypyx4lUFAQWAM7D3Z6cnZZgC07KnFMkVEfEAx55FRUrOZaBkRzCunhQFw8tBh5Be7yS8q8RTnfUGx8977Wn5fvndf+XOKSyjwvHrr0nML2ZmeR3ENEaO1sCcrnz1Z+Szckl5pv7/L0Co62Elyluvh6U2CxoUH4fJl0tPlgmPOhZ7nKLkpInKU8s5TJiKVJUQGkxAZ7OtmVFJY7GZnRh5bUnPYsi+HrftyS1+3p+XWGL96H9gv3lo5do0JDagwlL1saHsYMaEBvu1RExwFnU9xCoDbDanrPAlOT0n9vdJpQYXpsGaKU7zCE5zV1juNgK6nO4lO9RYSEalXijmPjJKazZC/n4twPxfhQfXz9Za4LXuy8tmRlsv29Dy2p+WyPT2XHel57EjLZXdWPjWtP1XstmxPy2N7Wh5QeWhQkL+LNuV6drb1JD+97xsseCyf3Fw7xRmWvvc3Z1+F5Ob1MPg2JTdFREREfCjQ30XHuDA6xoVV2ldc4mZXRr4nyZnDln25bN2Xw+bUHLan5VFYw5D69Nwi0nMzqpxHNCLY3zOHZ7mkp+c1Ljyw4ROeLhe07OGU/lc6dXnpsGOxJ9H5K8Vbf8W/JK/yufuTnbJpBkybAJFtoetpToKz4zAICm/YzyIiInIQSmrKIfNzGdpEh9AmOoQTq9hfWOxmd6aTtNye7jwZ35HufZ9H6v6CGq9fUOxmU0oOm1JyqtwfFuhHu9hQ2pbv5el5bRsTUvdzIrlc0Gsc9Di7muTmc/Drq9C6P7TpD0knQ4chEBxZt+0QERERkcPi7+eifYtQ2rcIBeIr7CtxW3Zn5lXo2bkltaynZ0Fx9QnP7PxiVu7MZOXOzEr7wgL9KvTs7FhuPs+WEUENl/AMiYGuo5wCzJ0xnbCc7RyfaGHHQtizAlLXQ3F+xfOydsDit5ziF+jEt11Pd0pcl4Zpu4iISA2U1JQ6F+jv8sxJVPkpOUBeYQk7M3KrTXpm5tW8uFFOYQlr92Szdk92lfujQwMO6OUZQltP4rNtTCjBAYe5iFGF5ObXzrD05FXOvuJ82DbPKfNfBOPnrDrZaQR0HunMVeTXSOZjEhEREZFSfi7jeVgeypAuFUfeuN2WvdkFbE6t2MPT+5pbWP0iFjmFJazencXq3VmV9oUE+JUuUpTkiZuTPAnPxMjg+p2KyfiRE54EA0fAwGucOncJpG+B7b/Cuu9h44yKc3OWFDo9ODfNgO8fgNhOngTnadDhZAhofNMRiIhI86ekZj0yxkQAEZ7NALe78a4U2ZBCAv3o0jKCLi0jqtyflV/EjgMSnjvSy5KgNQWPABm5RWTkVv3EHCA+Iqg0wemdx9Pby7N1dAgBfq6aP4DLBb3OgR5nOcnN2U86T7jLsyWw/RenzHocAiOcHpydRzqJzrhumqNIREREpJFzuQyJUcEkRgVzUucWFfZZa0nZX1CpZ+eWfTlsTc0lu6C42uvmFVX/kD7Q30WH2HJD2T1D25NahNE6up4W3HT5QYvOTul3KZQUOfNxrv8B1v9YNkrJK20T/PqKU/xDnBXau54GXU6DmA513z4REZEqKKlZv+4GJng3kpOTfdiUpiMyOIBerQPo1bry8G1rLem5RRXm8dzumdtzR1ouOzLyKKxhiBBASnYBKdkFLNmWUWmfy0CrqJAKc3qWX8yowpNzb3Kz1zmQvQd2LoFt82HTzMpJzsJsWPetUwAiWpf14uw4HCISDv0HJSIiIiI+Y4yhZUQwLSOCOT6p4srh1lrScgor9ez0vmbkVj8yqbDYzfq9+1m/d3+lfQF+hnYxTo/O8iu1J7UIo01MLR7O15ZfACQNccppD0HmDie5uf5HJ9YtKjdNVHEerPvOKQDxPcrm4mw3CPwD66ZNIiIiB1BSs349Bbzmef9dQkJCH182pjkwxhAbFkhsWCB920VX2u92O0/MS5OeaWXD2ndk5LIrI5+SGla+dFvYmZHHzow8FmxOq7S/R2IEH994ElGhBwwlj0iEHmOcApCTCptnOUN3Ns2EzO0Vj8/eBcs/dApAy2NKk5yukhLcfhrCIyIiIrWnEUKNizGGFuFBtAgPYkCHmEr7M3LLJTxTvQlPp7fnvpzCaq9bVGLZlJrDptTKc8/7uQyn9mjJS5f1r7vkpldUW2eo+sBroLgAts7zJDl/gH3rKx6bstYp815wRit1HuEkOLucBpGt6rZdIiJyVFNSsx5Za7OBbABjTJHLVcfBhVTichkSIoNJiAxm4AFPzMFZ+XJ3Zn6F1dq9K7jvSM8jObvmldvX7snmb5NX8p+L+tY8N2dYHPS+wCnWOkN0Ns1wkpyb51ScowicIT17f4NfXuJk409WZA8w5zo9OVv1Az/9UxUREZEaaYRQExIdGki/0ED6VfGQPiu/iG0HLFq0xdPLMyW7+gU3S9yWH1YnM33NXkb3Tqy/xvsHOTFq55Ew+jEnzl0/zUlwbplTccGhwmxYM8UpAIl9yhYbajNQMa6IiBwR/RWRo4q/n8tZLT02tMr9BcUl7EzPq7Bw0fb0XKau2F16zNcrdvPj6mSOT4rl5K5xnNwljl6tIquf0N2YsjmKjr8eSoph9zJPknOmMyG7u2wIkssWE525CmasghmPQFAUdBzq6cl5ijMxu+bjFBERkYo0QqiZiAwOoHebKHq3iaq0L6egmK2VhrTn8MumshFGf5+8ii+W7qB366jS68RHBNVfg2M7wYk3OqUwF7bM9czF+T1kbKt47J6VTpnzFARHQ5dTPb04RzmdAkRERA6Bkpoi5QT5+9EpPpxO8eEV6i8ZmMKVby4o3S4odjN3QypzN6QC0CIskMFd4hjaJY6Tu8bROjqk+pv4+UPbgU4Zdg8U7Hfm4vQOVT9wIvaCTGdBorVfO9tR7ZwEp7coABQRETnqaYTQ0SEsyJ9erSMrzT0/ackO7vpkOQCp+wv4/rdkvv+trLduQmQQvVtHcUybKEx6MR0iXVhrMXX9oDwwFLqd7hT7JKSu9yQ4f3CGrJd7kE9+Bqz63CkYaN0P2h4PbQY4vThjOzlz2IuIiFRDSU2RWhjWLZ5Pbz6JqSt2M2d9ChtTKs5jtC+nkCnLdzFl+S4AOseHMbRrPCd3iWNQ5xaEB9XwTy0o3DOZ+mkAzPv+C6IzltMrMNnpzZm9u+Lxmdth6XtOAWcYTyfPquodBkNADQlVEREfe/vtt7nmmmt46623uPrqq33dHBGRZmFMn1b8uDqZ6Wv3VrloZnJWAclZe5m+dm9p3aMLp3FMmyj6tIks7dXZNiak7hKdxkB8N6cMvhUKsp0H+N4V1SvEuBZ2LXWKV3AUtO7vdARoM8Ap4S3rpm0i0uwp5jw6KKkpUkvHJ8WWrmy5KyPP6am5PpWfN6RWmtB9Y0oOG1NyeHveFvxdhuPaR3Nyl3hO7hpH37ZR+NcweXthUAx7E0bQa8QIZz7O1HVOALhxhjOcpzC74gneYTzznge/IGh/orOieut+kHisgj+RKnz22WfMmjWLZcuWsXz5crKzs7nssst4//336+T6GRkZvP766yxbtoylS5eybt06SkpK+PHHHxk1alSV51x99dW88847FepcLhfR0dH07duX6667jssuu+yI25aVlcU///lPFi9ezMaNG0lLSyMyMpKkpCQuvfRSbrjhBsLCwiqcs2zZMiZPnsyPP/7Ipk2b2LdvH/Hx8QwbNox77rmH/v37H3G76tKIESOYNWsWWVlZvm6KiEiDCA7w4+XLB1BU4mZ98n5W7crkt52ZrNyZyerdWeQXVU507sspZPa6FGavSymtiwz2p3ebKPq0cXp1Du8aX3mBzMMVFAE9z3aKtZC8qizBuf1XsAe0MT/TecC/aUZZXVS7sgRnmwHQqq/TQUCkkVLMeXTEnLamhTmkXimpKXIYWkeHcPHAdlw8sB1ut2X17qzSJOeCLWkVnpAXuy0Lt6SzcEs6z0xbR0SwPyd1asHQrnGc3DWepBah1T8RNwbiuzvlxJugpAh2Li5Lcu5YCLak7PiSAtg82yle4QlOcjOxj1Na9YWYjhrOI0e1Rx55hOXLlxMeHk7btm1Zu3ZtnV5/y5Yt3HvvvQC0bduWuLi4Wi/aMW7cOPr16wdAYWEhmzZt4quvvmLGjBmsXr2aRx999IjalpaWxmuvvcbxxx/P2LFjiY+PJzMzk59++ok777yT119/nfnz5xMZWTa08eabb+bXX39lwIABnH/++YSHh7Ns2TImTpzIZ599xieffMJ55513RO0SEZEjF+DnKhuePrAd4CwgtCnFSXSu2pnF3N+2sjXLTX5J5fOz8ouZt3Ef8zbuAyAmNID3rz+RY1pXnt/ziBhTFpsOvRvy0mHHYifO3bkYdi6C3H2Vz8vc7pTVkz3XcUHLXtCmf9mw9fgeWoBIGg3FnIo5pX7pt73IEXK5TOkk7DcP70x+UQkLt6Qxd30qc9ansnp3xZ5C2fnF/LA6mR9WO39s2saE8PexvWq3SqVfALQf5JQR90N+Fmz9uSzJmfp75XP2J8OGH53iFRAGib09waQn4dmyFwQEH8FPQqTpeOaZZ2jbti1dunRh1qxZjBw5sk6v36FDB6ZNm8Zxxx1HbGxslU/Eq3PuuedWGiKzePFiBg4cyNNPP80//vEPgoMP/99qu3btyMzMJCCgcs+byy+/nA8++IBXXnmlNEAGSnsUdOnSpcLxH3zwAZdffjk33HADY8eOJTAw8LDbJSIi9cPPZeiaEEHXhAjOOw5mhu/FbS0d+5zAqp2ZTvEkPDPziiqcm55bxNVvLWTSHwdXu9BmnQiJga6jnAJOT86MrU6C05vs3L2s4srq4PTuTF7llCXvOnUBodCqn5Po9A5dj2qnhTbFJxRzKuaU+qWuWiJ1LDjAj6Fd43lgTE++uX0oi/4+iufG9+OiAW1pFVX5j8KO9Dz+9OESft+TXcXVDnazSOh+Jpz5BNy6AO5aA+e+DAOvg7YnOEFdVYpynGE+C/8HU26D10fCY63hpUEw6UaY94KTKM1Nq/p8kSZu5MiRdO3atVbzhqWnp5OUlERQUBCLFy+usM/tdjNixAiMMRWGEcXExHDqqacSGxtbJ+0dMGAAsbGx5Ofnk519GL8ryvHz86syuAS46KKLAFi/fn2F+j//+c+VgktwAs+uXbuyb98+Vq5cechtmTp1KoMHDyYsLIyYmBguvPDCSvceP348xhhmz55d5TU+++wzjDH8+c9/ZsuWLRhjmDVrFgCRkZEYYzDGMGLEiArn7dixg1tvvZVOnToRFBREixYtOOecc1i4cGGle2RnZ/Ovf/2L3r17ExkZSUREBJ07d+aSSy6p9N+EiEhT4DKGjnFhnN23NQ+M6ckH1w9i2T9PY869I3n5sv7cMqIzEcFO/5eU7AKuemsB6QdMt1SvjIGYJOh9AYx+DK77Hh7YATfNgbOegeMudx7IU8Xf8aJc2DYP5r8In14Nz/aB/3SFDy+BWU/ChulOz1CRBqCY8+iIOY0xREZGEhkZqZizgamnpkg9iwsPYly/Nozr1wZrLRtTcpizPoW561P5ZdM+cgpLKHFb/vHlKj6+cdCR3SyyNfS71CkA7hJI2wR7Vjjzbu5e4bzPSal8ri2BlDVOWfFxuWu29QxbLzeEPbqDnnbLUSMmJoaJEycydOhQLrnkEpYsWVI6TOahhx5i1qxZXH311Vx++eX11oYlS5aQlpZGhw4diI+Pr7f7TJkyBYBjjz221ud4g1V//0MLKSZNmsS3337Leeedx4gRI1i2bBmff/45M2bMYN68eXTv3h2AW265hY8//phXX32VYcOGVbrOa6+9BsCNN95IdHQ0EyZM4O2332br1q3cf//9BAUFAZCUlFR6zpIlSzj99NNJS0vjjDPO4Pzzzyc1NZXJkydz8skn88UXXzBmzBgArLWMHj2aefPmcdJJJ3H99dfj7+/P9u3bmTlzJkOHDmXAgAGH9NlFRBojYwztYkNpFxvKmX1aMbRrPFe9uYDCEjebUnK49p2FfHj9IEIC/XzTQL8AJx5tdSwMvNapK8iGXcvKDVtfDFk7K5+bkwLrvnOKV2zniosQJfYB/6AG+SgiVVHMWbPGHHNOmDCBgoICgNLrgWLOhqCkpkgDMsbQpWU4XVqGc82QjqxLzmbMc3ModlsWbE7jq+W7qNMZi1x+ENfVKb0vKKvPTvYkOsslO9M2AVVMcJy1wynrvi2rC4oqS3B6S3wP8NcwAGmeBg0axKOPPsp9993HTTfdxEcffcSMGTN45JFH6NmzJy+++GKd3Wvy5Mls2bIFcOY32rJlC1999RVt27blvffeq7P7FBcX88gjjwDOnEezZ89m+fLljBw5khtuuKFW1/j1119ZvXo1bdq0oXfv3od0/ylTpjBlyhTOOuus0rrnnnuOO+64g1tuuYXp06cDMGzYMI455hg+//xznnvuOeLi4kqP37x5M9OmTWPw4MH06dMHgAcffJCZM2eydetW/vrXvxIREVHpc1988cXs37+fGTNmMHz48NJ9u3bt4vjjj+e6665jy5YtBAUFsWrVKubNm8e5557LF198UeFabrebzMzMQ/rcIiJNxUmdW/DMJf249aMlWAtLt2Xw54+W8MrlA2pc9LJBBUVAx6FO8craDbuWeIauL3JWVC+oYuG4tI1O8T7MdwU4Ma03ydl2oJP41Dz00oAUc1atscecDz74YGnPVm/sqZizYSipKeJD3RIiuGZIEq/P2QzAI1PX8PCJfoT413MvyIgEiDgNup5WVleQDcmrKyY7k1c7iw8dqCATts51ipcrAFr28MzR6e3V2RuC63hieak/Dzah7+rBhv+jfs899zBjxgwmTpxI3759ef755wkMDOTjjz+utHLjkfjyyy/58ssvK9SFhIRw6aWXlgZRdaG4uJiHHnqoQt0VV1zBf//731rNn5Sens4VV1wBwNNPP42f36H13DnllFMqBJcAt956Ky+88AI//fQTW7dupUOHDgD88Y9/5NZbb+Wdd97h7rvvLj3+tddew1rLTTfdVOv7Tp06lY0bN/KXv/ylQnAJ0Lp1a+69917uuOMOpk+fXvrkHJzv4EAul4uYmJha31tEpKkZe2wr9mb34qEpqwGYtmYv//hyFY+d16dWw2l9IrIVRI6FHmOdbbcb9m1wFh/y9ubcswrcFecPxV3kJEN3LYGFrzt1QVHQ5riyRYjaDHDiaDkyijlrpJizIsWcijlroqSmiI/dPqobXy3fRXJWASnZBUze4M8fevhg6EtQBLQ/0SleJcWQus5JcHoTnXtWVD0PkbvIs38l8EFZfXQHz9D1Y8sWJopsreHr0uQYY3j33Xfp168fDzzwAACvvvpqnQZ9AG+99VbppO0lJSXs2LGDd955hwcffJAvv/ySRYsWER4efsT3CQ4OxlqLtZZdu3Yxbdo0HnjgAQYOHMh3331XYbj2gXJycjjnnHNYv3499957LxdffPEh3//A4A6cuZdOPvlkNm7cyNKlS0sDzCuvvJL777+f1157rTTALCoq4u233yYmJuaQ7j9//nyA0qfqB/LOr7RmzRrGjBlDr1696NevHx999BFbt25l3LhxnHzyyQwcOFCT1IvIUeGaIR1JzirglVkbAfhowXYSIoO5Y1Q3H7esllwuiO/mFO8UTUX5TsxafrX1tE2Vzy3IdOaZ3zSzrC6yLbQdUNajs1U/CDryv8siXoo5yyjmVMx5MEpqivhYeJA/fx3Tk9snLgPgx63FDG1T9YTKDc7PHxJ6OaXvJU6dtc5cReXn6Nyz0lmhsioZW52yZkpZXUhs2bD1Vn2d1xZdnfuJNGLx8fEMGzaMiRMn0qJFi9KnxvXFz8+PDh068M9//pN169bxwQcf8MILL5QGuHXBGEObNm246qqr6N69OyeddBK33norX3/9dZXH5+TkMHbsWObOnctdd93FE088cVj3TUiouqdLYmIiQIUhNhEREVx++eW88sorzJgxg5EjR/Lll1+yZ88e7rjjjkNamXPfvn0AfPrppzUet3//fsD5Dn766ScefvhhPvvsM+67777SNl111VX8+9//rpOAX0SkMbtvdHf2ZuUzaakzX+Wz09aTEBnMH05o7+OWHaaAYGh3vFO8ctM8w9aXOMPWdy6G3NTK52btgNU7YLWnh5txQXxPZ7V1b6KzZS/FtXJEFHMq5lTMWTv6TSvSCJzTtzUfLdjGL5vScFt4b3UBl51lG+ewHmMgqq1Tup9ZVp+XAcmrynpr7l7hLDrkLq58jbw02DzLKV7+wdCyZ8UenQnH6Ml3Q/LB8JqmZuLEiUycOJG4uDhSU1O57bbbeP311xvk3ieeeCIffPABCxYsqLd7DBo0iOjoaGbOnFnl/uzsbMaOHcucOXO49957Dzu4BEhOTq6yfs+ePQBERVUcmvbHP/6RV155hVdffZWRI0dWmKz9UHiv++WXX3LOOefU6pyYmBieeeYZnnnmGTZs2MCsWbN49dVXefHFF8nIyKjTeadERBojYwxPXHgsKfsLmLPeSfT97YuVxIcHMapXMxmOHRoLXUY5BZwH+RnbKi5CtGsZFOdVPM+6Ye9vTlnq+XvgHwKt+3mSnP2doevR7TVSyUsx50Ep5lTMqZizdpTUFGkEjDE8PK536aJBv6e7+Wr5Lsb1a+PrptVeSDQknewUr+ICSPm93NB1T6lqsvbifGci911Ly1UaaNG5LMnpTXhqLiPxgY0bN3LjjTcSHx/P4sWLufzyy/nf//7Hqaeeyvjx4+v9/unpzrQPbre73u6RnZ1NVlZWpcV1wHmKPXr0aH755Rf+9re/lU74frhmzZpVqa6kpIS5c525eo877rgK+4499liGDBnCF198wa+//sq0adMYNmwYPXv2rHQd71xLJSUllfYNGjQIgDlz5tQ6wCyvS5cudOnShUsvvZSWLVtWmotKRKS5CvBz8fLlA/jDa7+wcmcmbgu3frSED28YRP/2zXCuN2MgpoNTep/v1JUUw97V5RKdS5yH+PaAv83FebBtvlO8QuPKFiBq0x9a93cSqSIHUMypmBMUc9aWlnITaSS8iwZ5PTp1Ddn5RdWf0BT4BznzaR53OZz5BFzzDdy/DW5bBhe/B8PuhW6jIbK65K11Jnb/7QuY/hB8cAE81Q2e7ArvXwDTHoR1PzhP0kXqUWFhIZdccgn79+/nnXfeoV27dnz44Ye0aNGCG2+8kQ0bNtTr/dPT03nrrbcAGDFiRIV9Dz74IMaYKufqqcqyZcvIyMioVF9YWMitt96K2+1m7Nixle4/atQofvnlFx566KEjDi4Bfvrpp0rDjV588UU2btzIyJEjS+c2Ku+Pf/wjhYWFXHDBBVhrufnmm6u8dosWLQDYvn17pX3jxo2jc+fOvPTSS3zzzTdVnj9//nxyc3MBZ7XL3377rdIx6enpFBQUVDmZu4hIcxUe5M+bVx9P+9hQAPKL3Fz39kI2puz3ccsaiJ+/E9sOvAbGvQi3zIP7t8PVU+G0h6HXOGfOzarkpsL672HGo04c+38d4fnjYNKNzggnERRzNtWYc9u2bZX2KeZsGOqpKdKI3D6qG58u2EJGgWVvdgHPTVvP38/q5etm1S1jILajU3qVe2KVs69yj87U3ys/+QbI2QsbpjmFZ5yk6TkvakiP1NrkyZOZPHkyUDb0ZP78+aWTpUdGRvLoo4+WHn/vvfeyePFi7rrrLs4805l2oU2bNrz99tucffbZjB8/nnnz5lWYxPsvf/kLqanOED3vk+Ann3yS999/H4Bzzz2Xc889t8q2bdmyBSibtH3KlCns27eP448/vlJQ5X2K7u9fuz/pb7/9Nq+99hojRoygQ4cOREdHs2vXLn744Qf27NlD9+7d+c9//lPhnPPPP59FixbRuXNn3G53lcHsueeeS79+/WrVBoCzzz6b8847j/POO48uXbqwfPlyvvnmG2JjY/nvf/9b5TkXXXQRd955Jzt37iQuLo7zzz+/yuNOPfVUPv30Uy6//HLOOussQkJC6NChA1dccQUBAQFMmjSJM844g7FjxzJ48GD69etHaGgo27dvZ+HChWzatIndu3cTGhrK8uXLOe+88xgwYAC9e/emdevWpKSk8OWXX1JUVFQ635GIyNEiPiKId649gQtenkdaTiHpuUVc+cYCvrhlMC0jaz/fXLMRFF55tFL2HqcXZ+mK60udRYcOlLbJKau/gks/hk6VFzSRpu1gMWdcXBwTJkwoPV4xZ9OMOc8//3xGjRpFcHAw3bp1U8zZkLyrUKmUFeBBwB5Q9hzhNRf379/f1qcZM2bYGTNm1Os9pP499sGPtsN9X9sO931tOz0w1f6+J8vXTfKdwlxrdyyydtFb1k6509rXR1n7SKK1EyIrl4Vv+Lq1pRrrv8XVq1fb1atX+7oZjcKECRMO/B1fobRv395mZTn/9r766isL2IEDB9rCwsJK17rzzjstYG+77bYK9R06dKjxHhMmTKhw/FVXXVXlcREREfb444+3//d//2fz8vIq3f/cc8+1LpfL/v7777X67HPnzrXXXnut7dWrl42OjrZ+fn42JibGDhkyxD755JM2Jyen0jkH+yyAfeutt2p1/7feeqv0+ClTpthBgwbZ0NBQGxUVZc8///yDfo477rjDAvYvf/lLtccUFxfbu+++2yYlJVl/f38L2OHDh1c4Jjk52d533332mGOOsSEhITYsLMx26dLFXnDBBfa9996zRUVF1lprt2/fbh944AE7ePBgm5CQYAMDA22bNm3s6NGj7TfffFOrz9yY1Pb3QP/+/S2w2DaCuEyl8cSdjfXvmxyauvoel25Ltz3+/m1p3Dr62dk2K6/y30mx1paUWJuyztqlH1r79d3WvjrC2odaVIxl/5Vg7cYZh3TZxvpvUjFnmYPFnB06dLBZWVk2KytLMWctPktjjDkfeOAB27FjR8WcVWiIuNNYq2GbBzLGPAiMB0aUqy6x1qYcwTUX9+/fv//ixYuPsHXV806ye2A3cWlaZsyYweML8vk93XkSNqhTLB/dMKhxLhrkC+4SSNsMe5bD8o+dYTwAfoFw7ffOHEU+1lj/La5ZswagyvlgpKLs7GyAKuf5aUystcTHx3PKKafwySef+Lo5DWLEiBHMnj2b33//na5du1Z7XFP5DhtabX8PDBgwgCVLliyx1g5oiHZJ3aqvuLOx/n2TQ1OX3+OM3/dy/TuLKHE7/085pEsL3rr6BAL9NcvZQRUXOKusT7oBspxV5fEPhj9MhM4ja3WJxvpvUjHnoWkKMYtizupjTq+m8D02tIaIO/XXpnrF1to95cphJzRFDoUxhit6BeHncpKYv2xK46vlu3zcqkbE5QdxXaD3BXDxO5DQx6kvKYRProLcNN+2T6QBrVq1in379vHAAw/4uikNYsGCBcyaNYszzjijVsGlSGNnjHnQGGMPKHt83S6R2hrZvSWPn9+ndPvnDfv4y6fLcbvVceag/IMgaQhc/XXZ/PLF+fDReNgw3bdtEzmAYk5prJpkUtMYc6Ex5gVjzBxjTJYnAHz/IOe0Nca8aYzZZYwpMMZsMcY8a4ypbqm+TsaYncaYzcaYicaYTvXwUUSq1DbCxTWDk0q3H526hv0Fxb5rUGMVEAKXvAtBUc525jbnaXc9rtQn0pj06dMHa22lVRubm5dffpm///3vnHfeebhcLh566CFfN0mkLv0OtCpX+tR8uEjjctHAdvzl9G6l218t38W/v13jwxY1MbGdPIlNzwJDxfnw0R88c8eLNA6KOaWxaqoLBf0d6AvsB3YAPWo62BjTGZgHtAS+BNYCJwC3A6ONMUOstfvKnfIrcLXnuJae+80zxhxzwHEi9eb2UV35cvkuUrILPIsGreNvY+t30aCiEjcp2QXsyconOTOf5Kx89mQVkJyVT0p2AV0TwvnbmJ74+zWi5yGxneC8l2Hipc72hmkw+0kYoYmURZqLJ554gh07dtCpUyfee+89TjjhBF83SaQuFVtr1TtTmrQ/jexCclYB7/2yFYDX52wmITKY64f6vl9IRm4hq3ZmsXJnJqt2ZrI7M4/xx7fn4uPb+bppZbyJzXfOhsztUFIAH10K4z+ErqN83TqRo4ZizqanqSY178RJZm4AhgMzDnL8f3GSk7dZa1/wVhpjnvZc61GgdGkva+235U82xvwCbAKuAp6ug/aLHFREcAB/H9uT2ycuA+Ctn7dw0cB2dEs49Dk6rLVk5RU7ycqs/NKkpXc7OctJZKbuL6CmaXbnbkjl3flbeeDMHow9thWtokIO89PVsR5j4eQ7Ye4zzvbMf0PbAdBFQaBIc+BdmVOkoRljLsSJNfvhPFCPAD6w1l5ewzltgYeB0UALYDcwGXjIWptexSmdjDE7gUKcB+t/tdZuqsOPIVLvjDE8eM4xpGQX8N1vTo7+kalraBkZzDl9WzdYO9JzClm1K7M0gblyZybb0/IqHbd0ewa9WkfSu01Ug7XtoGI7OonNt892Rh+VFMDEP3gSm6f5unUiRwXFnE1Pk0xqWmtLk5gHWzzFM2z8dGAL8NIBuycANwJXGGPuttbmVHO//caY3wBNpiAN6py+rfnw1238ujmNYrfln1+uqrRoUGGxm73ZnmRlZkG5RGU+ezLLkpj5RXUzJLvEbXlk6hoembqG45NiOLtva87s3Yr4iKA6uf5hG/l3Z7L1LXMAC5/fADfNhuhG9BReRESaGo0OEqklP5fh2fH9uOKNX1m4xcnf3/3JMuZtSMXfz+AyTgE878HlMhgDBs+2p954jjWGCtveY0xpHeQUFPPbLqcn5o70ygnMqlgL09YkN66kJkBMkiexeZYnsVnojEa65APodrqvWyci0ug0yaTmITrF8/qDtbZCVsdam22M+Rkn6TkIqHJGZmNMME4Qe7AeoRhjqltmskd2dnbpCnX1wbvaVn3eQ+rfgd/j2a3dLNwCbussGnThs99T7Ib0fEt6gZvswrq7twEiAg0xwYboIOc1JsgQFmD4amMROUWWknI9ORduSWfhlnQmfPkbPVu4ODHRnwEJ/oQH+mal9oDW1zNw1yqCCtMhL42sN85j6XH/xroCGrQdjfXfYmhoKKGhoaXtk+qVlJQA6GfVhOk7rFpJSQm5ubkH/f2kn1spjQ4SOQTBAX7878rjufCVeazfu5+iEsvEhdt92qZAPxfdEyPo3SaKdcnZLN7qJFx/25Xl03ZVK6aDZyj6WZDhSWx+fBlc/B50H+3r1omINCpHQ1Kzu+d1XTX71+MkNbvhSWoaY/4DTAG24QSm/wDCgHfqtaUiVWgb4eK09v58v9VZKGhxcslhXSfID2KCDNGeRGVMsOuAbUNUkMHfVXVCclSHAPYXWhbvLWbB7mJW73PjzW9aYPU+N6v3FfLu6kKOifPjxEQ/+if4E+LfcAnOosBofjvmPvot+xsuW0Jk9nq6bHiD9d1uPvjJIiLNnK1pfhGpkkYHiRy6qNAA3rn2BC54eR67M/Mb9N6Bfi56tHISmH08pVtCBIH+znzwa3ZnceZzcwBY3ViTmuBJbE51emxmbPUkNi+HS96D7mf6unUiIgfVUHHn0ZDU9I4pyKxmv7c+ulxdW+AjIA5IAX4BBllrtx7sZtbaAVXVG2MWR0RE9B8xYkQtmnx4vL0u6vMeUv+q+h4HDCpi+dOz2ZNVOTB0GYiPCCIhMpiEyGASI4NJjAr2bAeRGBlMQlQwEUH+B/0fsto4y/Oakl3Ad6t2M2XFbhZuSSudi7PEwoqUElaklBC/xfDutSfQs1XkEd+39kZASwvfPwBAm13f0mbQ+XDsxQ3Wgsb6b3Ht2rVYawkLC8PlakSLPTVC3l5qERGHPoetNA76Ditzu934+fkRGRl50Inv9XM7LA06OshzfIOOEGqsIxHk0DTU93h/fxcrUgLxzoDkts6DcOt5dbZt6Xb516qOpYpz3Bb8DLQJd5EU5aJNuAt/VzGwD3L3kboeUteXtanYbfE3UGxhZ0YeX/8ww2cjjGojqMc/6Lfs74Tk7wF3Ee6Jl/N79z+TEn8Sbr+gRvtv0js6KDMzUzFnLWh0SfOg77Eit9vdICOEjoak5sF4/4qVppGtteN91BaRKkUEB/DJTScxdeVuwoL8aBnhJC4TI4OJCw/0yWrk8RFBXHFSEleclMTuzDymrtjN1yt2s2x7RukxKdkF3PLBEr66dQgRwQ04BHzQH2H7r7B6srM95XZI6A0J9bt6fGMXFBREfn4+OTk5SliIHIVycpzOgUFBPp4DufnS6CCRcqKCDEPbNuwUQAfj7zK0jXCxJcvJtG7NcnNMnJ+PW1W9guB4lvV7pDSx6bLF9Fz7DN1/f5794Z1IDelEVkg7XIWtKQiKpTAwlqKACDC+TSS63W7cbjf5+fmEhob6tC0i4hv5+fmlvwvq09GQ1PT2xKxuFujIA46rM8aYCJxVMgEC6vvLlOatfYtQ/jiis6+bUaVWUSFcP7QT1w/txLZ9uXy9chcv/rSB3MISNqfm8MCklbzwh+PqpKdorRgD416E5N9g33ooynXmIhpwNUS2gah2ENUGwhPB72j4NeiIiIggPz+fPXucVUnDwsIwxjTc9yIiDc5ai7WWnJyc0n/7eqhRbxp0dBA0/AihxjoSQQ7N0f49Dk5fyZZftwFg4jowYkQXH7eoFk4aDG+PhfTNAKXTLEVmr698rCsAIhKdEtUOOo+EbqMhvGWDNTc1NZWUlBSysrIIDQ1VzHkQGl3SPOh7rBh3ZmVlERAQQJcuXYiLi6vxvCP5mR0N/zf/u+e1WzX7vXMWVfdU/UjcjTOHEgDJycn1cAuRxqV9i1BuGdGFNtEh3D5xGQBfr9jNiR1jueKkpIZrSFCEM+/Q66c4Sc20TfDjPyseY/wgopWT4GzZCzoOhaShDRr0NaTY2FhycnLIzc1lx44dvm5Oo+YdPuLn13h7b0jN9B1WLTQ0lNjYWF8342il0UEijUCfciuer9pZ5/1a6kdUG7j2e5jzH9g8G1LWVn+suwgytztlx0L4bRJgoO1AZz7O7mMgvofTCaCeKOY8NIpZmgd9j5U1RNx5NCQ1vXMSnW6McZWf48jTk3IIkIfzZLyuPQW85nn/XUJCQp96uIdIozSuXxt+3ZzGh54n4f/6eg392sXQp211nabrQcuecM4L8Pl1Ve+3JZC1wynbf4XFbzn18T3LEpxJJ0No80gAuFwu2rVrR1paGtnZ2RQUFGjhkGrk5uYCR/eT1qZO32EZYwxBQUFEREQQGxur+c3qj89GB4FGCInUVvmk5oodTSSpCRCRAGOedN7npsHOxWyZ+ynB+XtJDLOQvQeyd0N+VZ/JOgnOHQth+sMQ09FJbnY/E9qfVOcjlxRzHhrFLM2DvkdHQ8edzT6paa3daIz5AWcOoz8BL5Tb/RDOvEWvVrcK5RHeOxvIBjDGFOl/IuRo88+zerFsWward2dRWOLmlg8X8/WfhxIV0oDzK/W5EKLbw/YFkOlJYGbugMydkLO36nNS1jhlwWuAgcTekDQMOg6DDidBcAMmZuuYy+UiLi7uoEMAjnbeoXkHW0xFGi99h+IDvhwdBBohJFIr3tXQC4vd7EjPIy2nkNiwQF8369CExkLX09iy04mpE8tPJVCYC/v3QNZu2L0Mfv8Wts5zHuZ7pW+GX15ySnA0dD3dSXB2GQXBdbPAp2LO2lPM0jzoe/SNJpnUNMacC5zr2Uz0vJ5kjHnb8z7VWvuXcqfcAswDnjfGnAqsAU4ERuIEln+rp3bqibkc1YID/PjvZf0564W57C8oZntaHvd+tpxXLh/QsHPqtDvBKQcqyofsXZC+Bbb96gzn2bHQGbZTysKelU755SVn4vVW/ZyenB2HOU+3A8Ma6IOIiEgj5svRQaARQiK1EujvomerSJZ7FrdcuTOT4d3ifduouhQYCrGdnJI0BE76k9Ozc8M0WDsVNkyHwnIrDednwMpPnOIKcEYpeXtxRrfz2ccQEamNJpnUBPoBVx1Q18lTALYCpUlNT2/NgcDDwGhgDLAbeB54yFqbVk/t1BNzOeolxYXxfxceyy0fLAHg+9+SeevnLVx7ckcftwwICC4L+jqfAiMfcJ5ub/ckOLfMgZ1LKj7Ztm7YtcQpPz8HLn9oM8BJcCYNrTp5KiIizZ4vRwd57q8RQiK11KdNWVJzVXNLalYlNBaOvdgpxQWwZa7Tg/P3b51RTF7uItg0wynf3gOJfcoSnK361es8nCIih6NJJjWttQ8CDx7iOduBa+qjPTXQE3MRYEyfVlx1Ugfeme8s5vrYN2s4rn00x7WP8XHLqhAY6qwS2Xmks12QDVvnw5bZTqJz9wrKrfEA7mInCbr9V5j9JPgF0TeiKxnRfWBrILQZCP5NbEiTiIgATWd0kKetGiEkUkvHtokGnHnfV+zI8GVTGp5/EHQ51SljnnRGI/3+Lfw+FXYvr3isd7TSrCcgojV0H+0kOTsOc64jIuJjTTKp2VToiblImb+O7cnS7Rms2JFJsdty64dLmXrbyUSHNvKEX1AEdDvdKQB56c68RJtnw+Y5sPe3iseXFBCTsYqYjFXw1kcQEArtTvQMVx/uPOWu48nYRUSk3vSjaYwOAo0QEqm18gtXrmxKiwXVNWOg1bFOGXGfM+f8Ok8Pzs2zoaSw7NjsXbDoTacEhjujnLqPcebjDGvhu88gIkc1/Z+1iDSIIH8/Xrq0P2Ofn0NWfjE7M/K4+5PlvH7lQFyuJjSUJSQGeox1CkBOqjNMffMc5zX1gPUfinLLhvEABEY4iw15h6sn9gGXX8N+BhERqZUmNDoINEJIpNa6tgwnyN9FQbGbXZn5pO4vIC5cPQ+JagPHX++UgmzY+JOT4Fz3nfNg36twP6z5yinGBe0GOUPUu4+BuC6+a7+IHHWU1KxHGgYkUlG72FD+c1FfbnxvMQDT1+7l9TmbuGl4Zx+37AiExcEx5zkFIGs3q799jeiMlbTOX+8sQlReYTas/8Ep4KyknjTUKR2HQnxPUK9uERE5RBohJFJ7/n4uerWOZOm2DMBZLGhk95a+bVRjExQBvcY5paQYdixwFhr6/RtI21R2nHXDtnlO+fEfENcN+l/pJEYDQnzXfhE5KijaqV93Azs9pY+GAYnA6cckcn25RYL+7/vfWbSlPkfjNbDIVuxNGM667rfC7cvhjpUw7r/Q9w8Q2bby8fmZsPZr+O4+eHkw/KcrfHIVrP+x4dsuIiIicpQ4to2GoNeanz90GAxnPAp/XgJ/WgijHnJ6aHLAiKvUdfDD3+H5/rD4bSchKiJST5TUrF9PAW08ZWVCQoKPmyPSONx3Zg+Oax8NQIlnfs19+wt826j6Et0ejrsMznsF7lzlBIJnPwe9L4SwKnoE5KbC6snwwYUw8TLI3FH5GBERkQMYYyKMMa2NMa3RCCGRg+rTNrr0/dJt6Vhrqz9YyhgD8d3g5Dvguu/hL+udB/g9znLmkvfK3gVTbof/ngi/fQH6nSQi9UBJzXpkrc221u6y1u4CNAxIxCPAz8WLl/YnOjQAgD1Z+dz5yXLc7mYeTBoDLTrDgKvhwjfgL+vgTwtgzH+g5zkQElvx+LVfw4snwLwX9ZRbREQORiOERA7BseUWC5rxewqnPDWL56atZ+u+HB+2qgkKj3ce4I//AO7dBGOfhvBynXn2bYBPr4bXRzpzdCp5LCJ1SFk2EfGJNtEhPHNxv9Lt2etS+O/MDb5rkC8YA/Hd4YQb4JL34J6NcPPPcNzlZccU5cAPf4PXRsCORT5rqoiINHoaISRyCDrHh9MqKrh0e3NqDs9MW8fwJ2dy/n9/5r35W0jLKazhClJJQAgcfx3cthROnQBBZYljdi+D986Dd8+BHYt91kQRaV6U1BQRnxnZoyV/HFG2SNDTP65j/sZ9PmyRj7lckNgbxr0E13wL8T3K9iWvhP+Ngq/vrLj6pIiICBohJHKo/FyG968/kYsGtCU8qOL6uUu2ZfCPL3/jhEencf07C/l6xS7yi0p81NImKDAMht4Fty+DIXeAf1nymM2z4X+nONMspfzuqxaKSDOhaKceaW4jkYO7+7RunJDkDLt2W7ht4lJSspvp/JqHosNguGmO85Tb37typIVFb8KLx8OKTzR8R0REROQIdI4P58mL+rLo76N48dLjGNWzJf6usoVvit2WaWv2cuuHSxn4yDTu+XQ58zakUtLcp0yqK6GxcNpDcNsyGHgtGL+yfWu/hv8Ogsm3VFxNXUTkECipWb80t5HIQfj7uXj+D8fRIiwQgJTsAm6fuFTBIoB/oPOU+0+/QNfTy+pzUmDSDc7wneTVvmufiIiISDMQHODHWce25n9XHc+Cv43iX+OOob9nUUuv/QXFfLp4B5f+71eGPP4T//5mDWv3ZPmmwU1NZCs46xm4dSH0vqCs3rph2QfwwkCY/CclN0XkkCmpWb80t5FILSRGBfPs+H4Yz4PxeRv38fz09b5tVGMSkwSXfgIXvwsRrcrqN8+Gl0+C9y+ETbPUc1NE5CimEUIidSM2LJArTkpi0i1DmHXPCO4c1Y2OcWEVjtmTlc+rszcx+tk5jH52Nq/M2sjKHZnszMgjr1DD1KvVojNc+CbcNBu6jCqrtyWw7H1PcvMW2LfRd20UkSbF/+CHyOGy1mYD2QDGGM1tJFKDoV3j+fPILjz/k7NY0PM/rWdgUgxDu8b7uGWNhDHQaxx0PgVmPAa/vuI83QbY8KNT2h4Pl7wPEYm+bauIiPjC3cAE74ZGCIkcuQ4twrh9VFduO7ULy3dkMnnpTr5avqvCAkJr92Tz+LdrK5wXHOAiNjSQmLBAYsMCiQkt/xpAdOgB9WEBBPn7HXj75qtVX7j8c9g6D2Y+DptnOfW2xOm5uXwi9B0PQ+92EqEiItVQUlNEGo3bR3Vj4ZZ05m/ah7Vwx8RlfHnrENpEh2CMOfgFjgZBETD639D3DzD7/2DN14Cnh+aOhTD1bhj/gU+bKCIiPvEU8Jrn/XcJCQl9fNkYkebEGEO/dtH0axfN38b2ZM76FL5YuosffttDQXHlXtH5RW52ZeazKzO/1vcIC/SrNgkaExZYmiSNCXWSoDGhgXX5EX2jw2C46quak5sDrobRjzvTMomIHEBJTRFpNPxchuf+0I8xz80ldX8B+3IKOfmJGQT4GaJCAogMCSA6JICoA0toYKW66FDnNTigmT71bnWs0ytz30aY/6KzgBA4k67vXAxtBvi2fSIi0qA0QkikYQT4uTilRwKn9EggO7+I739L5rtVu9mZkU96TiFpOYUUlhz69A85hSXkFOaxIz2v1ueE+ENEoKH1bz9XmwQtnySNCgnAz9UIOwrUlNxc9AaExcHIv/q2jSLSKCmpKSKNSsuIYJ7/Qz8u/9+veNcKKiqxpO4vJHV/Yc0nVyHQ31WW6CyX9Iwsl/g8MBka6XnfJIYBtejsTLyelwG/TXLqfnoUrpjk02aJiIiINHcRwQFcOKAtFw5oW1pnrSW3sIS0nELScwvLvRaRUWG7kPScItJyC0nPKaT4MBbJzCuGvGLL3tyMWh1vDESFBFTo9RkbFkB8RBC9W0fRr300raJCDrkddaY0uTkfZj7mzB8PMOdpOOZ8aNnDd20TkUZJSc16ZIyJACI8m5qwXaSWBneO49Hz+vDiTxtI2V9AYRXDemqrsNhNSnYBKdkFh3xucIDLkwwNLE2Elk9+xoQG0KNVJL1bRxES6OME6IgHYPVkZ57NjdOdJ90dBvu2TSIiIiJHGWMMYUH+hAX50y42tFbnWGvJLigu7enpTYKm5xSWJj0rJkmLSM8tPOQ1Iq2FjNwiMnKLIDWnymMSI4M5rn00x7WPpl+7GPq08UGc2+EkuGIyvHmGM72Suwi+vgOu/gbUC11EylFSs35pwnaRw/SHE9rzhxPaA5BfVEJmXhGZeU4Q5n1fWnILK9d5SlHJ4a8Inl/kJr+ogOSsmhOifi5Dt4QI+rWLom/baIqy3bQOa+ChPfHd4NjxsPxDZ/unR+DqqaC5SEVEREQaNWMMkcEBRAYH0KFF2MFPAErclm+nzSS70NK1dz8n0VlDEjQtx4mXD2ZPVj7frtrDt6v2AE6c27NVBP3aRXNcuxiOax9Nx7iw+p/v3uUHZz8Hrw4DdzFsmw9L3oGB19TvfUWkSVFSs35pwnaROhAc4EdwgB8JkcGHdJ61lrxqEqJZ1SVIy5WSWg4DKnFb1uzOYs3uLD5asB2AQD/ot24+fdtF0bddNH3bRtM2pp4XPBp+L6z8xAn8tv4Mm2ZC55H1dz8REWk0NEJI5Oji5zKEBzplYFJsrc4pLnGTkVdUqUfotrRclm1PZ8WOTHILSyqcU+K2rNqZxaqdWbz/yzYAokMD6Ns22tOjM4Z+baOJCg2o889IwjEw+DaY+7Sz/eME6H4mRCTW/b1EpElSUrMeacJ2Ed8yxhAa6E9ooP8hzw9krSWnsMSTDC0sTYSWT5Duycpn5Y5MNqTsrzT8p7AEFmxJY8GWtNK6FmGBpQnOvp5enTFhdbiSY2xHOO4KWPyWs/3TI9BphHpriogcHTRCSERq5O/nIi48iLjwoCr3F5e4Wb93P0u3ZbB0WzrLtmewfu/+Ssdl5BYxa10Ks9allNZ1ig8r7cnZr100PRIj8Perg///HX4v/PYFpG+Ggkz47n646O0jv66INAtKaoqIVMEYQ3iQP+FB/rSJrjkhmp1fxMqdmSzfnsny7Rks2JhMWn7lXp77cgr5ae1eflq7t7SuQ4tQT5Izmn7tojimddSRrdg+7B5Y9iGUFMDORbDuO+eJtoiINHcaISQiR8Tfz0XPVpH0bBXJpSc600Bl5hWxYkcGS7dlsGy7k+xMz608jH1TSg6bUnL4fMkOAEIC/OjTNsrpzdnO6dF5qKOuAAgIgbOfhXfHOdu/fQF9/wDdzjjcjykizYiSmiIiRygiOIDBneMY3DkOgJkzZ5Ke7yasXS+W78hwkp07MsjOL6507tZ9uWzdl8tXy3cBzlCi7gkRpUnOvu2i6doyAj9XLXtbRrWB46+DX/7rbP/0KHQ9Q5Oqi4g0cxohJCL1ISokgKFd4xnaNR5wRjNt3ZfL0u3pLNuWwdLtGazelVVp9fa8ohIWbE5jweayUUuto4Kd4ertnKHrvdvU8mF+pxFOInP5R8721LuhwxAICq+rjykiTZSSmiIi9SAm2MWIYxI5/Rhnzh+327J5Xw7Lt2ewfHsGy3ZksmZXFoUlFec8K3FbVu/OYvXuLD5a4NSFBvrRu00U/coNXW8TXcP8nCffCYvfhqJcSF7pBIB9/6DEpoiIiIgcEWMMSXFhJMWFcd5xbQFnUc9VOzM9PTmd3py7MvMrnbsrM59dK3czdeVuAPxdhp6tIktXWz+uXQwdWoRWHeOe/iis+x7y0iBzO7x/AfQdD93HQERCvX5mEWm8lNQUEWkALpehc3w4nePDOb+/EwAWFJfw+55sJ8np6c25oYp5i3ILKz/pjgsPLB227szTGUV0qGd+zvCWcOJNMPcZZ/vLW5z5hxKPhdb9oFU/5zW2sxKdIiIiInJEggP8GJgUW2HBouSsfCfBuT2dpdsyWLkjk7yiiosQFbstK3dmsnJnJu/O3wpATGiApyenMz/nsW2jiQoJgLAWMPrf8MVNzsnbf3HK13dC2+OhxxjocRbEdW2wzy0ivqekpoiIjwT5+3FsWydYu+Ikpy4rv4hVOzJZtiPD06szkz1ZlZ90p+4vZPravUwvNz9nUovQ0oWI+re/ir5B/8MUZDs7C7Jg61yneAVGQKtjy5KcrfpCiy7gOoI5PUVERETkqJcQGczo3omM7u2MWioucfN7cranJ6eT7NyUklPpvPTcImb8nsKM38sWIerSMpzj2kXTr90gzux1JbGr3y13hoUdC5wy7UGI6+b03uxxFrQZoAf4Is2ckpr1yBgTAUR4NgPcbndNh4uIEBkcwOAucQzuEldatycz3zM3ZwbLd2SwYnsm2QWV5+fcsi+XLfty+XKZMz/nEL87uDP0G3rYjYQXZ1S+WWE2bP3ZKV6B4ZDYp1yis5/zxFuJThERERE5TP5+Lo5p7SyKefmgDgBk5haxbEfZSutLt2WQmVd5EaINe/ezYe9+Pl28g78xmq6Bx3N59G+MsAton70MQ7n/z05d55Sfn4XwBGfBzB5nQcdh4F/1qu8i0nQpqVm/7gYmeDeSk5N92BQRaaoSo4JJjErkjHLzc25KzSlNci7fnsHq3VkUlVScoP3nkp78nN0TsLQijT6uTfQP2MqJwdvpVrKBsOL0yjcr3A/b5jvFKyDMSXSWH7oe102JThERERE5bFGhAQzvFs/wbmWLEG1OzSlbaX17Omt2Z1NywCJE6wtbMGHvMGAY0WRzimsp44KXMsguJ8iWG+G0P9mZZ37x286D+y6jnARn19MgJLqhPqaI1CMlNevXU8BrnvffJSQk9PFlY0SkeXC5DF1ahtOlZTgXDCibn3PN7uxyCxFllBvSY9hNC3a7W/BDwfFQAGBJJI0+rs0cH7SdE4O30aV4A2FF+yrfsCinbN4ir8AI6HY69DzHCQwDw+r7Y4uISA00QkhEmjpjDJ3iw+kUXxbj5hWWsHJnJss8c3Mu2ZZOclZB6TkZRDDJPYxJucMIopCTXSs53bWY0/wWE2uyyy5euB9WT3aKyx+STnYSnN3HQFSbhv2gIlJnlNSsR9babCAbwBhT5NJ8HiJST4L8/ejXLpp+7aJL6zLzili5w1mAaNl2p6Rke4NAwx5asMfdgh/zBkKeU9uSdPq4NjE0bCcDA7fSqWgDoYUple5HYTas+twp/sHOk++e50C3M/TkW0TENzRCSESanZBAP07oGMsJHcsWIdqdmceybRks3e4MXV+xI5OCYjcFBDLdPYDp7gE8UOymv1nH6X6LOd21iCRXud+J7mLYNNMp3/wFWh8HPcY6Sc74HlDV6usi0igpqSki0kxFhQRwctc4Tu7qzM9prWVPVn7ZauvbM1i5M5P95ebn3EuMEwxmDyitiyedfn5bGBG5i/4BW0gqWEdIQblEZ3E+rP3aKa4A6DTcSXD2GAthZXODiohIvdIIIRE5KrSKCqFVnxDO7NMKgKISN7/vyWbptnTPIkQZbE7NYZHtwaLiHjzGpXQ1OzndtYjT/BbRz7Wp4gV3LXXKT49AbKeyBGfb4zXdkkgjp6SmiMhRwhjjBIFRIYzu7QSBzvyc+0uTnMt3ZLDmgPk5U4jhx5IYfkw/zlNj6Wm2cU7gYs4KWEi74q1lN3EXwYZpTvn6DugwBHqe7QSGGtojIlJvNEJIRI5WAX4uereJonebKK44yalLzyn0LELkGbG0LYCX8tvyUsm5JLKPUX5LON21iJNcqwkwJWUXS9sE815wSlh8uYWGhkNAsG8+oIhUS0lNEZGjmDM/ZwRdWkZwoWfuovyiEtbszvIkOZ1k56bUnHJnGdbYDqwp6MATBefTyezirIBFXBG1gvjs1WWHWTdsmeOUb++FNgOh1zlOL87Yjg37QUVERETkqBETFsjI7i0Z2b0lULbQpnel9SXbOvLhntMItzmMcC3jwrBlDDXLMYX7yy6SkwJL3nVKQBh0OdVJcHY7HUJifPTJRKQ8JTVFRKSC4AA/jmsfw3Hty4K1zNwiVuzMKB26vmx7Bqn7nfk5N9nWPF94Ds+nnMOoVgU80mMLiTt/gG2/AOVWq9y5yCk//tPpwXnJ+xAai4iIiIhIfSq/0OZFA9sBsHpXFuf992e+Kh7CV9lDuLhfHE/0z8D8/g2s/QZy9pZdoCgH1nzlFJe/E8v2OAt6jIGotj76VCKipKaIiBxUVGgAQ7vGM7RrPODMz7k7M58l29J5YfoGfk92VpectjuImck9uGn4GP58XiTBG76FNVNg82yw5Yb2bP0Zln0Ig2/1xccRERERkaNcr9aRPDzuGO77fCUAnyxLpX+nPow/+wwY+4zzMH7t17Dma0jbWHaiuxg2z3LKt/dAq77Q+VTofAq0OxH8A330iUSOPkpqiojIITPG0Do6hNbRIZzeK5FXZ23khZ82UFjipthteWnGRr5dGcZj55/LoCuvg9w0WPcdLHkPts1zLrJtvpKaIiIiIuIzFw9sx4LN6Xy+ZAcA//zqN/q0jeKY1lHQ7gSnjHoIUtd5FsacCjsXV7zI7uVOmfu0M0w96WQnwdl5JMR102rqIvVISU0RETkigf4u/nxqV87s04oHJq1g4ZZ0ADal5jD+tV/4wwntuf/MHkT1u9R5kv3yYOfEtV/DE0nOkJ3Its5rVBuIageRbZz3Ea3AL8B3H05EREREmi1jDI+c25tVOzP5PTmbwmI3t3ywhCl/PpnI4ADvQRDf3SlD74as3fD7N06Cc/NsZ6FMr6IcWP+9U8CJaTuPdJKcHUdAWIuG/ogizZqSmvXIGBMBRHg2A9xuty+bIyJSr7q0DOfjG0/iwwXbePzbtewvKAbgowXbmL4mmYfH9WZ0rx7OxOp5TuKTvHSn7FlZ9UWNC8ITod3xMOpBiO3UMB9GRERERI4KIYF+/Pfy/pzzwlxyCkvYui+Xez9dwcuX98dU1csyshUcf51T8jOdxObGGbDxJ0jfXPHYrJ2w9H2nYJwH/AOvgf5XqQenSB1w+boBzdzdwE5P6ZOcnOzj5oiI1C+Xy3D5oA5Mu2s4o3omlNbvzS7g5vcXc/MHy8g49Ulo0RVcteiBad2QvQtWfwkvD4FfXgE9IBIRERGROtQ5PpzHLzi2dPu73/bw5s9bDn5icBT0PBvOehpuXwa3LYWxTzuLCAVFHXCwhd3LYMrt8N39imlF6oB6atavp4DXPO+/S0hI6OPLxoiINJTEqGBev3IA367awz+//K10pfTvftvDzxsj+euYSYwf2AaTkwKZOyFzu/MkO3OHU7zv95d7GFSUC9/d5yQ4x70ILTr76NOJiDQ+GiEkInJkzu7bmoVb0nh3/lYA/v3NGvq1i2ZAh5jaXyS2k1OOvw5KimHXEqcH58afYMeisoUzf30F8rPgnBfq4ZOIHD2U1KxH1tpsIBvAGFPkcqljrIgcPYwxjOnTiiGd43jsmzV8vGg7ANn5xTwwaSWTl+7k3+f3oVPbAdB2QNUXKS6AXUvh6zth72qnbts8p9fmqf+EE28Cl18DfSIRkUbtbmCCd0MjhEREDt3fxvZk+fYMlu/IpNhtufXDJUy9bSixYYexormff9liQyPuh7wMp5fm6snO/uUfQkEWrvircPtpxXSRw6Esm4iI1Kuo0ACeuPBYPrz+RDq0CC2t/3VzGqOfm8NLMzZQVFJNjyL/IGg/CG6cCcPuAeNJYBbnwfcPwFtjIHVD/X8IEZHG7ymgjaesTEhIOMjhIiJyoCB/P168tD9RIc40Sbsz87nj42W43fbILx4SDRe+Cf2vLKtb+zV9Vv4Lv+LcI7++yFFISU0REWkQg7vE8f0dw7h5eGf8XM7E6IXFbp78/nfOefFnVuzIqP5k/yA45e9ww0+Q0Lusfvsv8MoQmPciuEvq9wOIiDRi1tpsa+0ua+0uQCOEREQOU7vYUJ6+uG/p9ux1Kbw0o44eorv84OznYfBtpVUxGSvou/yfkJtWN/cQOYoo2hERkQYTHODH/Wf24Ms/DaF3m8jS+jW7szj3pZ95dOpqcguLq79A635wwwwYfj+4PDOoFOfDD3+DN0dD6vr6/QAiIiIi0uyd2jOBm4eXzd/+zLR1zNuQWjcXNwZO/xecWjpjCJHZ650RSFm76+YeIkcJJTVFRKTB9W4TxeRbhvDXMT0IDnD+FLktvD5nM2c8O5s561OqP9k/EEY+4CQ3y6+/tmOBM9fmz8+p16aIiIiIHJG/nN6NEzrGAk6cetvEpSRn5dfdDYbeBWOfwuKMYCJlDbx5BqRtqrt7iDRzSmqKiIhP+Pu5uHFYZ76/YxiDO7cord+elscVbyzg7k+Wk55TWP0FWh3rDEcf8deyXpslBfDjP+GN0yHl93r+BCIiIiLSXPn7uXjxD8cRF+4s4pO6v5A/f7SU4urmgj8cx1/Pmp534fbOG5+x1Rl9lLy67u4h0owpqSkiIj7VoUUYH1x/Iv934bGlk7IDfL5kB6OensVXy3dhbTWTs/sHwoj7nIWEEo8tq9+5CF4ZCnOfgZIahrOLiIiIiFSjZWQwz48/Ds908CzYnMZ/flhXp/fYmzCMVb3/Cv7BTsX+ZHjrTNi+sE7vI9IcKakpIiI+Z4zh4oHt+PGuYYw9tlVp/b6cQm77aCnXv7OIXRl51V8gsY/Ta3Pk38HlSYyWFMC0B+GN02Dvmvr9ACIiIiLSLA3uEsedo7qVbr8yayPT1yTX6T3SWgyEyydBkGfO+fwMeHccbJxRp/cRaW6U1BQRkUajZUQwL13an9evHEhiZHBp/fS1eznt6Vm8O38Lbnc1vTb9AmD4PXDTLGhVtmIlu5bAq8NgzlPqtSkiIiIih+xPI7swvFt86fZdnyxne1pu3d4kaQhcNQVCPdMyFeXAhxfDmil1ex+RZsTf1w0QERE50Gm9EhjUKZb/++533vtlKwA5hSX888vfmLx0J09ccCxdEyKqPjnhGLh+urNg0KwnoKTQKdMfxv3bV2SPfp790d3IKywhv6iEvKIS8gqd13zP+5VbiihwW5YU/u7sLyohr9Bd4fj84pLSayREBvOfi/rSLja0AX9KIiIiItIQXC7DM5f0Y+zzc9idmU9mXhG3fLCES09sj7XgthZrLRZwuz2vFqfOux/vcU6921O/eXMhFlhc+DtuG0x01/9yyZo/E1m4F0oKcX98JVM6/JVlcWPK3avsmtZa3G6wlF2TCvesWNezVSQ3De9EkL+fL3+kInVCSU0REWmUIoID+Ne5vTmnX2vu/3wFG1NyAFiyLYMxz8/hwgFtCfRzeZKO7kpJyvyigSS6/o+/lrxAbzYC4NqzjJC3RvJa8QW8WnIWxQf7M7huQ63aumVfLrdPXMqnNw/GzzvpkoiIiIg0G7Fhgbx4aX8ueXU+xW7Lyp2ZPDBpZd3dYGNZ3Pk2f+O9wH/TybUHF27GbX2E5Ru28lbJmUd8m29X7SE+Iog/nND+iK8l4msafi4iIo3a8UmxfHP7UG47tSsBfk7CsKjE8tGC7bwzfyufLNrBlOW7mLYmmbkbUlm8NZ3Vu7PYlJrDvOyWjMt/kMeLxlNgnQRmoCnhnoBPmBz4T3qYbXXWziXbMnhv/pY6u56IiIiINC4DOsTwwJie9X6fncRzUeEEfnN3KK37Z8B73On/GVDNVEyHYNXOzCO+hkhjoJ6aIiLS6AX5+3HXad0Y26cV932+gmXbM2p9bgl+vFJyDtPc/Xkq8FX6GqfXZm/XFqYE/Z3Pw8bzXcyl+AcEERLoR0iAi3179xDkZ+jWOYmQAD9CAv0IDvBz3h+w/eWynbw6exMA//f975zaM0HD0EVERESaqWuHJBEW6MeSbekYDC6Xs+ilAVzG4DKebYOz3zjD1w1l9S7jHGuArVu34jLQsWPHCue6jGFB0bu0WHk7iZnLALjdfxKndQxiUc97MS6/0nt6r1mhHS7n/sZTv2pnJq95YtbkrALf/PBE6piSmvXIGBMBeCd9C3C73b5sjohIk9c9MYLP/ziYH1cnsyl1P6EBnuRi+aRjoPMaXO59SIAfQf4uXPY6+OUl+OlRKCkggGLG57zP+IjlcMEHEOM8DZ85Mx2AESO61dQcADq3DOOntXtZv3c/uYUl/PWLlbx77QkYo2HoIiIiIs2NMYbxJ7RnfB0N3545czcAI0Z0rfqAYd/CJ1fAhmkA9Nr+Eb1iDYx7CVy1H3zbJjqkNKm5Nzv/yBot0kgoqVm/7gYmeDeSk5N92BQRkebBz2UY3TvxMM/2hyG3Q7cz4ctbYMdCp3rPSpj8R7jmm0O+YpC/H09ceCwXvDwPa2HO+lQ+X7KTCwe0Pcw2ioiIiIh4BIbC+I/gixvhty+cuuUfQtLJcNxltb5MQmRQ6fs9mUpqSvOgOTXr11NAG09ZmZCQ4OPmiIgIAPHd4Nrv4fRHwXhWftz6M+xZdViX698+hmsGdyzd/tfXq0nJ1rAeEWk4xpgIY0xrY0xrNEJIRKR58Q+EC96AvpeW1f34D8hNq/UlWkYEl75P3V9AcYn+TkjTp6RmPbLWZltrd1lrdwFFrkPoGi4iIvXM5QeDb4Ve55TVLXrjsC/3lzO60TYmBIDMvCIe/Oq3I22hiMihuBvY6Sl9NEJIRKSZcfnB2P9ApGc0UO4++OlftT490N9Fi7BAANwWUvcX1kcrRRqUsmwiInJ0G3hd2fsVn0BB9mFdJjTQn8fPP7Z0e+rK3Xy3as+Rtk5EpLY0QkhEpLkLDIMznyjbXvQW7Fhc69MTIst6ayZnaQi6NH1KaoqIyNEt6WSI6+68L9wPKz4+7Eud3DWOi8rNpfmPL1eRmVt0pC0UETkojRASETlK9BgLXU/3bFiYeie4S2p1avl5NZXUlOZA0Y6IiBzdjIHjy/XWXPgmWHvYl/v72F7ERzgBY0p2AY99s+ZIWygiIiIi4jAGzvw/8Pf0uty9HBa9WatT1VNTmhslNUVERPqOh4BQ5/3e34jMWnvYl4oKDeBf444p3f540XZ+3pB6pC0UEREREXHEdoShd5dtT/8X7N970NMqJjW1qKU0fUpqioiIBEdBnwtLN9vs/PaILje6dytGH5NYuv3ApJXkFhYf0TVFREREREoNvg1iOzvvCzLhh38c9JTySc096qkpzYCSmiIiIlBhwaD4lJ8JKMw8oss9PO4YIoP9AdiWlsvTP6w7ouuJiIiIiJQKCIYxT5Ztr5gIW+bWeEpilObUlOZFSU0RERGA1v2gzQAAXLaYxD3Tj+hyLSOD+ftZvUq33/x5M0u3pR/RNUVERERESnU5FXqdW7Y99W4oqX6RypYRZT0192r4uTQDSmqKiIh4leut2XrXd+B2H9HlLhrQlpO7xAHgtnDlmwu49PVfePCr3/jg160s3JKm1dFFRERE5PCN/jcEhjvvU9bCL/+t9lANP5fmxt/XDRAREWk0ep8P3/8V8jMIyU+GjdOh62mHfTljDP8+vw+nPzObvKISsvOLmbdxH/M27qtwXMuIILomhNO1ZQTdEiLolhBO14QIokICjvQTiYiIiEhzFtkaRjwAP/zN2Z75OPS+AKLaVjq0RVgg/i5DsduSmVdEflEJwQF+DdxgkbqjpKaIiIhXQAj0uwx+ecnZXvjGESU1AdrFhvLv8/vwj8mryC6oerGgvdkF7M0u4OcNFZOdCZFBdEuI8CQ7nURn14RwIoOV7BQRERERjxNvgmUfwt7foCgXvrsfLnm/0mEul6FlRBC7Mp1emslZ+XRoEdbQrRWpM0pqioiIlDfw2rKk5vrvIWM7RLc7okuee1wbzu7bmu1puaxLzmb93v2sS85mXfJ+Nqbsp7C46mHuyVkFJGcVMGd9aoX6VlHBdE2IoFvLcKeHZ0IEXVuGE6Fkp4iIiMjRxy8Axj4Fb412ttdMgfU/VvlwPiEquFxSs0BJTWnSlNQUEREpL64LaTF9iU1fDtYNi9+GU/9xxJf1cxmS4sJIigvj9GPK6otL3GxLy2Vd8n7WJ2ezbq/zuiklh8KSqpOduzPz2Z2Zz+x1KRXqW3uTnZ5EZzdPsjMsSH/uRURERJq1Dic5I46WfeBsf/MXuOUXZyRSOQnlFgvSCujS1On/ckRERA6wq/WZTlITYMm7MPw+8A+sl3v5+7noFB9Op/hwRvdOLK0vLnGzNS3XSXQmOz071yfvZ1PqfopKbNXtzsxnV2Y+sw5IdraJDqFbQriT5PQkPbu0DCc0UGGAiIiISLNx2sOwdirkZ0D6Fpj7LIx8oMIhCZFBpe+V1JSmTv83IyIicoB9LU6gIDCWoMI0yNkLa792FhFqQP5+LjrHh9M5PpzRvcvqi0rcbN2XUyHRuS45m82pORS7q0527szIY2dGHjN+r5jsbBsT4kl0htOtZQR92kbRLSGiPj+WiIiIiNSXsDgYNQG+vtPZnvsMHHsxtOhcekhClHpqSvOhpKaIiMgBrMuP3a1OJ2nrRKdi0ZsNntSsToCfiy4tI+jSMoIxfVqV1heVuNmSWi7Zudfp4bk5NYeSapKdO9Lz2JGex09r95bW/eX0btx6Std6/xwiIiIiUg/6XwVL3oNdS6CkAL65By7/HIwBKg4/35NV4KtWHpS1lozcInZl5rEnM5+48CD6tov2dbOkkVFSswbGmL8CjwIvWWtv9XV7RESk4exqfTpJ2z4FWwJb5kDK7xDf3dfNqlaAn8uzOnoEYylLdhYWu9mcmuPp1ekZyr43m637cqtMdj4zbT1DusRxXPuYhmy+iIiIiNQFlx+c9TS8NhKwsHE6rPkKeo0DILER9NS01pKVX8yezHx2ZeaxOyOf3Zl57MrIZ0+Ws70rM4/8oorzyz9wZg9uGt65mqvK0UhJzWoYYwYBNwArfN0WERFpeIVBLaD7mc7Qc3B6a575hG8bdRgC/V10T4yge2LFYeUFxSVsSslhvWdhou9/28O65P2UuC13f7KcqbcNJSTw/9m77zi7qnL/45/nTO+TOumdFCAQCARCDaiAoqIIKl5UVEQElesPvdeCgl7r9YIVBWzYC0pRUECQ3gk1HUgvTPr0ftbvj7XPnDOTmckkc87sU77v12u9dt9nnbMJWXn2Ws/KC6nWIiIiInLQJhwFx14Mz/zUb//zczDzdCiq6JFTc3uKgpqNbZ28HgQpu4OVsQBmXSvb9rbQ1N51wPf95j9XMa6qmHMWTExBrSUTKajZBzOrAn4HfAT4csjVERGRsBx7cTyo+cLv4Q1fhsKycOuUJEX5ecwbX8m88ZUAvPuYyZz1vYdpau9i7c4mvn33Kq55+2H7uYuIJItGCImISFKdfhWsuB2adkDDVnjo23DG1xhbmTj8vBXnHBYMTR+MlvYutgXBya17gyBlnQ9exnpYNrR2JuUrlBbmMb6qmPauKJt2twDwmVteZExFESfMHJ2Uz5DMlnFBTTM7DzgVWAAcCVQAv3POXTjANZOArwJnAaOAbcDtwFecc3v6uOQm4C/OuX+bmYKaIiK5avqpMHIm7H4N2urh5b/Awg+GXauUmDyylKvfdhj/9Vc/QOHmx9fzxnk1nHSIGowiqaYRQiIiknQl1XDG1+C2j/ntJ34MR76PirHzKC3Mo7m9i9aOKPWtnVSVFAB+JM/rda3dPSx7Biv9+t7mjqRUryg/woTqEsZXFTO+qoQJ1cWMqypmQlUJ46v9vsrifMyMuuYOzrvhcV7Z3khHl+Njv17KLR9fzNxxlUmpi2SujAtqAlfhg5mNwGZg7kAnm9lM4HFgLHAHsApYBFwBnGVmJzrndiWc/1FgFvD+lNReREQyRyQCx3wY7v2i337253D0B7oTrWeb84+ZxL0rXue+lX7ioM/+5UXu/s9Tuhu6IpJ8GiEkIiIpc8R74Llfw4bHfJ74u67EPvQPaiqLWbezCYDLfreUupYOtu1tZVdTe1I+tjAvwriqYsZXFScELn2gcny1D1xWlxYMuodoVWkBN394Ee+8/jG2N7TR0NbJRb94htsuP4HxVSVJqbNkpkwMan4aH8x8Fd9j84H9nP9jfEDzU865H8Z2mtl1wb2+Dlwa7JsDfAM42TmXnD/NIiKS2Ra8D/79P9DZCttehC3PwaSFYdcqJcyMb5w7n6XffZg9zR1sq2vlK39bznXvWRB21URCoRFCIiKS0czg7GvhhpMg2gkbH4cX/0hN5bTuoOZjr+7az016yo8YNZXFTAh6U3YHLKtLuntZjiwtJBJJbieAidUl3PyhRbz7xid8zs76Vi76xTPc8vHFVBbrBXyuyrigpnOuO4i5v6i+mc0AzgDWA9f3Onw1cAnwfjO70jnXBCwGRgPLEu6dB5xiZpcCZc65tiR8DRERyRSlI+Gwc+HF3/vtZ3+etUFNgLEVxXzjnfP5+O+eA+DW57dwxmE1nHX4+P1cKZKVNEJIREQy29h5sPhyeOz7fvveq1g09/c8uXbfUyMGNZXxIOX4yliwMr4cVV5EXpIDloN16IRKbrhwIRf98mk6o47VtQ187NdLufnDx1KUrwkuc1HGBTUP0OnB8l7nXDTxgHOuwcwewwc9jwfux79Ff7bXPX4JvILvwbnf3ptmtrSfQ3MbGhp48MEHB135A9XQ0ACQ0s+Q1NNzzHx6hpmv9zOsyFvAQnxQs+ulW3ii7M10FlT0d3nGKwEWT8jjia1+VsrP/uk52raUUlWUOcPu9edwaGK/n2iEkIiIZIFT/svnhq/fAs07+RR/ZNTbr6C9M9qdv3J8VTFjK4rIz4uEXdsBnXTIaP73vCP4f39+EYAn1u7is7e8xPfesyDpvUMl/aX8v1YzG2Nm7zCzd5rZuFR/Xi9zguWafo6/EixnAzjn9jrnliUWoAnYHWy7FNdXRETSUEPFbBrKZwCQF21n3Ov/DrlGqXfhvCJGBEHMhg745bI29NegpLtktzudcw84514ZTBtwECOEmvAjhMqCfYkjhDrNrBMfOL0s2C4aav1FREQAKCqHs77VvZn/3C/54NRdfPSUGbz1iAksnDqCCdUlaR/QjDn36El89sw53dt/e3Er/3vP6hBrJGEZck9NM1sAnAvc6px7odexjwA/BGKNsk4z+y/n3PeH+rmDVBUs6/o5HttfnawPdM71OSbRzJZWVFQcvWTJkmR91D5ivVFS+RmSenqOmU/PMPP1+Qwrr4C/XwHArL0PM+vC67J2wqCYymk7eP/PnwbghR1d7KiYxbuPmRxyrQZHfw6HpqIiPXsip3m7c9hHCImIiAzavLfBrDfBq/8CHNx1JVx8P0Qyc9j2ZUtmsnVvC797aiMANzz0GhOqi/nA4mnhVkyGVTKGn1+IzxPU4420mR0B3IDPSfko0ACcBlxnZs845x5PwmcPVexfo/2+fXfOLRmeqoiISFqbfz7c+yVoq4ddr8K6h2DGkrBrlVInHzKGDy6eyq+e2ADAV/++gsUzRjF5ZGnINZMcls7tzsGMEDoDP0LofufcXmBv4glm1j1CaDAfONxpj5TWITvoOWYPPcvsMJzPsXjUu1j02oNEXAdsfZ5Xf/dZNk96W8a+qD+9yrF8TB4v7PApk66+Yzk7Nr7Kwprhz7SoP48Hbyhpj5LRt/hEYKlzrrbX/k/gG5b/45w7xTl3Nr4hB0EuoWEQ64lZ1c/xyl7nJZWZVZjZBDObABREo9H9XiMiImmqsAyOvCC+/czPw6vLMPrcm+cxY7QfLdvY1slnbnmRaFTD0CU06dzuHPYRQiIiIgeitWQ8G6ae170967Wfc9xTH2Pmqz+nau9ycF0h1u7A5UWMjy8oYkaVD2054IYX23h1T2Z9Dzl4yQhfTwbu62P/G4FWoDtxg3PuUTP7Nz6H0HCIJVWY3c/xQ4Jlf2/Uh+pKfA4lAGpre7e/RUQkoxzzYXj6Rr++6i64+wtQNgpKR0HJSL+MlZIRkJf58/GVFObxf+8+kvN+8jhRB0+t280vHlvHxSfPCLtqkpvSud25P0kfITTcaY+U1iE76DlmDz3L7DDsz7HjePjJU7DbT39e0lrL5M1/Y/Lmv0F+CYyZAzWHwdhD/bLmMCgfOzx1O0jHHNfGu37yOOt3NdMRhetf7uKvHz+OGWPKh60O+vN48IaS9igZ/9oaDWxJ3GFmI4FpwEPOuZZe5y/Hv2UfDrEZKs8ws0hifiMzqwjq0QI8maLPvxa4KVi/u6amZn6KPkdERIbD2Lkw9STY8Kh/k/1k77lAeimuSghyxoKevZcJx9M0EHr0lBFctmQWP3rgVQD+957VnDhrNPPGV+7nSpGkS+d2Z6gjhERERAaloBguugv+9WVYfTe0Jwz97WyBbS/4kqh0NNQcCmMPiy/HzvUjmdLAqPIibv7QIt71k8fZ1dTOnuYOPvjLp7n14ycypkLz7mWzZPzLqQMY2WvfUcHyuT7Ob2KAN9TJ5Jx7zczuxQ8/uhyfPD7mK0AZcKNzrilFn9+Az+mEmXVEIpkxk5iIiAzg5E/7oOZgtNb5ErwJH5TiajjkTXDO9ZCfPo2wT73hEP69ajsrttXT3hnlvTc9yU8uPJoTZo4Ou2qSW9K23UnII4SCF/axrg5KeyQiIv2rnADv+hl0tsG6R2DVnfDKvVC/pe/zm3fCuod96WYwcnq8R2dsOXJGKJMPTRtdxs8vOpb33vQErR1RNu1u4aJfPs1vPnIcI8sKh70+MjySEdR8FT/kJ9EZ+AZkXz0gxwOvH+yHmdk7gHcEm+OC5WIzuzlY3+mc+0zCJZcBjwM/MLM3ACuB4/DJ49cAXzzYuoiISA6a9Ub42CP+DXbzbmje5ZctsfXY9h4OKpbSuhdevgXKxsBZ30xy5Q9eYX6E775nAe+4/jFaOrqoa+ngAz9/mq+943Deu2hK2NWT3DGs7c4DFPYIIaU9EhGRA5NfBIe80ReApl2wfTnUrkhYroSOvvqBOf/ifvdaHxTtvmcxHPFuePN3fK/QYbRgcjU/uuBoLvnNs0QdLN9az3k3PM5vPnIcE6tLhrUuMjySEdT8O3CVmd0E/Aj/dvpSfF6je/o4/3hg3RA+bwHwwV77ZgQFYAPQHdQMemseA3wVOAt4C7AN+AHwFefc7iHUZUB6Yy4ikqXGH+HLQKJd0LLXBzl7BzwTl4nHWvbSHQh98scw4zSYfcYAHzK85oyr4I+XHM/Fv36WHQ1tdEYdn7v1ZdbubOK/z5pLXiQzZ86UjDLc7c5BC3uEEEp7JCIiQ1U2Cqaf4ktMNAp7N0Dtcti+Ir7c9Sq4PmIcna3w3K+howXO/emwz6z+xkNr+Na5R/Dft76Ec7B2RxPn/eRxfvORRcwae/C5GyU9JSOoeS3wfuAjQQGfCP0bzrn6xBPN7FBgLvDbg/0w59w1wDUHeM0m4EMH+5lDoDfmIiK5KpLnG4ZlowZ/TVcn/OlCWPNPv33H5XDZkwd2jxQ7cnI1d1x+Ihf/6llWbPN/zd/08FrW7mji++9dQFlR+uUElawyrO3OTBohpLRHIiKSEpGIH2Y+cjrMe2t8f0cr7Fzte3PWLgsCniugMRgg8fItMGYunPKZvu+bQu8+djKlRXl8+k8v0NHl2FbXynk3PMEvLzqWo6aMGPb6SOoMubXjnKsDTgB+CawCHgM+6Zy7qo/T3wK8CNw11M/NENcCE4Pyck1NTcjVERGRtJaX73Nplgd/XzRthzuvADdcKQEHZ0J1Cbdcupg3zov/vXbfylrOv+EJttX1nqdFJHlCaHcuwI8Q+iBwZrBvRsK+83rV7zXgGOBmfDDzSmAmfoTQYufcriHUZUBmVmFmE8xsAhohJCIiqVZQDOOPhAUXwJlfh/ffBleugoUJ/cke+AZs6Svldeq99YgJ/PKiRZQW+vyee5s7eN9Pn+KhNTtCqY+kRlJe4TrntjnnLnbOHeacO8U51+d0sM65/3POHeWcezEZn5vunHMNzrmtzrmtgN6Yi4jI/pWN8oHNmJV/hxf/GF59+lFWlM+N71/IJafM6N63Yls95/zoMV7ctDe8iknWG852p3PuGuecDVCm9XHNJufch5xz451zhc65qc65K1KZ8ihwJX5m+C3AfI0QEhGRYWcGb/kOTDnBb7suuP0yPyFRCE46ZDR/+OjxjCgtAKClo4uLf/UMd7zQz4RIknGSGmUzs1Fm1ntGShERETkQh7wJjvlwfPsfn4W9G8OrTz/yIsYX3jKPb79rPvlBPs3tDW28+8Yn+MfL20KunWQ7tTv3oRFCIiISvrwCeMePoaDUb+9YCQ9+K7TqHDm5mlsuPaF7oqCOLsd//ukFbn5sWFJuS4olJahpZueY2avAdmCHma0xs7cn496ZTMOARETkoJ3xNRgZ9IJsb4DbPu4Ttaeh9xw7hV9/ZBFVJf4teFtnlMt+9xzXP/AqLs2GzkvmU7uzbxohJCIiaWPkdHjTV+Pbj30PNi8NrTqzxpbzl48v5pCx5YDP7HTN31dw3b2r1VbNcENu7ZjZ8cBf8fmFLCizgL8Gx3KZhgGJiMjBKSyDd94EFvxVveFReLLPUbZp4YSZo7ntshOYPrqse9937lnNlX9+kbbOrhBrJtlE7U4REZEMccxHYNrJft1F4faP+8mFQjK+qoQ/f2wxR02p7t73g3+/ylW3L6MrqsBmpkrGK9z/F9znq0ANfmbIrwF5wbFcpmFAIiJy8CYfCycnzBh5/1ehdnl49dmPGWPKue2yE1g8Iz5b+63Pb+HCnz3F7qb2EGsmWUTtzn5ohJCIiKSVSATO+REUBC+8d66GB78ZapVGlBXyu4uP49TZY7r3/e6pjXzyD8/pJXyGSkZQczHwSJBIfYdzbrtz7svAI/jZKXOWhgGJiMiQnfpfMH6BX+9qh1s/Flqy9cGoLi3kVx9exHuOmdy975n1ezjn+kd5pbYhxJpJllC7s38aISQiIullxDQ4I2EY+uM/gE3PhFYdgNLCfH72wWM4Z8GE7n3/ePl1PnzzMzS2dYZYMzkYyYiyjQWe7GP/U8CYPvaLiIjIYOUVwLk3QX6x3659GR74Rrh12o/C/Ajfetd8vviWeZifP4hNu1s498eP8/CaHeFWTjKd2p390wghERFJPws/DNNP9esuCndcBh0toVapIC/Cd9+9gItOmNa977FXd3HBTU+yqzF9Ow/IvpIR1CwAmvrY3wTkJ+H+IiIiuW3MnF7J1r8PGx4Prz6DYGZ89JQZ3HjhQkoK8gBoaOvkQzc/w2+e3BBy7SSDqd3ZD40QEhGRtBQbhl7oJ+lh5xp44Ovh1gmIRIyr33Yonzljdve+l7fUcf4NT7B5T3OINZMDodZOCim3kYiIJM2xH4UZS4INB7d9DFrrw6zRoJxx2DhuuXQx4yp9T9OuqONLty/jmr8tp7NLfy+KiIiIZL3qKXDG1+Lbj/8INj4VXn0CZsYnTj+Eb7xzPpFgdNHanU286yePs0ZpkzJCsoKaF5nZvxML8EGA3vuDcn+SPjfdKbeRiIgkRyQC5/wYiqv89t6NcM/nw63TIB0+sYo7PnEiR0yq6t538+PrufjXz9LQ2hFizSRDqd0pIiKSaRZeBDNOCzZcWgxDj3nfcVO4/n1HU5jnQ2S19W2cf8MTLN2wO+Sayf4kK6g5DVjSq0wDrI/9sZILlNtIRESSp2oinH1dfPv538Kqu8KrzwGoqSzmT5cs5i3zx3Xve3D1Ds77yRNs2q0hPnJApqF25z40QkhERNKaGbz9h1BY4bd3vQr//trA1wyjN88fz80fOpayQp82qa6lg//42VM8sGp7yDWTgSQj99Bp+z8lNznnGoAGADNTbiMRERm6+efB6n/Asr/67b99CiYdC+Vjw63XIJQU5vGjC47mutFr+NEDrwKwuraBd/74MW58/zEsnDoi5BpKBlC7s39XAlfHNjRCSERE0k71ZDjz6/D3T/ntJ66HuW+FqYvDrVfghFmj+eMli7nol0+zq6md1o4oH/31s3zn/CN451GTwq6e9GHIQU3n3EPJqIiIiIgM0lv+DzY8AQ1boXmnD2xe8Ae6pxpPY5GI8Zkz5zB9dBmfu/UlOrocOxvbueCnT/Kd847gnAUTw66ipDG1Owd0LXBTsH53TU3N/DArIyIi0qejPwAr7oDX7qd7GPqlj0Fhadg1A2D+pCr+8vETeP/Pn2LznhY6o45P/+lFdjd18JGTpoddPekl6V0HzWyRmX3GzL5nZjeY2f+Y2TvMrDjZnyUiIpKTSkfCO66Pb6/5Jzz/m/DqcxDetXASv7v4eEaUFgDQ3hnlij++wHf/tQbnXMi1k0yhdmecZj8XEZGMYAZv/wEUVfrt3Wvh/q+GW6depo8u468fP4E5NRXd+/7nzhV8555VaqemmaS1dszsWDN7DngC+DbwKeAS4AvAX4FNZvahPq5LxhB4ERGR3DLzdFj0sfj2Pz/nG4UZZNH0kdx++YnMGlveve/797/Cp/74Aq0dXSHWTNKd2p0iIiIZrGoSnPmN+PZTN8D6x8KrTx9qKov588cWc0xCeqTrH3iNz9/6Mp1dyludLpIS1DSzs4FHgAXANuAP+Abm/wbr24BRwM/M7BsJ150B/Gcy6pCOlLBdRERS6o3XwOjZfr2jCW67FKKZFQycOsq/CT/5kNHd+/7+4lYu+OmT7GhoC7Fmkq7U7hQREckCR10Is94UbATD0NubQq1Sb1WlBfzmI8dx+tx47vo/PrOJy3//nF7Ap4khBzXNrAbfgIwCHwemOucudM593jn3OefchcAU4GNAK/DfZnaamb0duANIj8QJqXElsCUo85WwXUREkqqwFN55I0SCzmebnoIVt4dapYNRVVLALy86lguPn9K97/mNe3nH9Y+x6vX6EGsm6UbtThERkSzRPQy9ym/vWZ92w9DBT3R54/sXcu5R8bzv9yyv5aJfPk1Da0eINRNITk/NTwPlwH845250zu0TrnbORZ1zPwX+AzDgN8AtwG7gtiTUIV1dC0wMyss1NTUhV0dERLLOxKPhhE/Gt5/+WXh1GYL8vAj/c87hXPO2Q4kE8x1t2dvCu378OP9epZeC0k3tzn5ohJCIiGScygnw5m/Ft5++CXasDq8+/SjIi/B/5x/JxQkTBT25djfvvUkji8KWjKDmWcBTzrn9NhKdc7cDTwETgJXAcc65l5NQh7SkhO0iIjIsFn0s3ltz4+Pw+rJw63OQzIyLTpzOzz94LOVF/vs0tXdx8a+e5RePrlNidgG1OweiEUIiIpJ5jrwApp/q110U7vtKuPXpRyRifPHsefz3WXO79y3fWs/5NzzOpt3NIdYstyUjyjYdePwAzn8ccMCJzrnNSfh8ERGR3FY5Hua9Lb79zE/Dq0sSnDZ3LH/5+GImVpcAEHXw1TtXcNXty+hQYvZcp3Zn/zRCSEREMo8ZvClh2Pnqu2DDE+HVZwBmxseXzOTb75rfPbJo/a5mzv3J42xqUBs1DMkIahYA7QdwfgfQ5pxLrwywIiIimWzRJfH1l/4MLXtDq0oyzB1Xye2Xn8hRU6q79/3uqY18+OZnqGtR/qIcpnZnPzRCSEREMtaEBTD/3fHtf30J0niEznuOncJPLlxIYb7/u3ZHQxvfeKqFNXs0edBwS0ZrZxsw/wDOPyy4RkRERJJlymIYe5hf72iGF34fbn2SYExFEX/46PG8/cgJ3fseeWUn5/74MTbsyvoYlfRN7U4REZFsdPpVkFfo1zc/Ayv/Hm599uPMw8bx6w8voiJImdTSCd95ppX7Vij9y3BKRlDzYeBNZjZ3fyea2TzgTOChJHyuiIiIxJjBoovj28/8DLJgopDigjy+/94FfPqNs7v3vbajiXdc/xhPr9sdYs0kJGp3ioiIZKMRU3uOPLrvGuhK79E5x88YxR8/djyjy4sA6IjCx367lL8szfaMN+kjGUHNH+GHAt1pZof2d1LQsPw7kAdcn4TPTXuahVJERIbV/HdDUZVf3/0arH843PokiZlxxRsP4QcXHNU9zGdPcwf/8bMn1WjMPWp3ioiIZKuTr+zZll1zd7j1GYTDJlTx148vZkyJT7LZFXV85pYXuenh10KuWW4YclDTObcU+A4wA3jOzH5vZh8xszPM7E3B+h+A54NzvhNckws0C6WIiAyfonKY/6749uZnwqtLCrz9yAn88ZLjGV3uhyZ1dPlG4//evYpoNH3zLknyqN0pIiKSxUpHwsIPxLc3PRVeXQ7A1FFlfPG4YiZXxENs3/jHKr75z5W4NM4Nmg3yk3ET59x/m1kTcBXwXuA9vU4xoBO4xjn31d7XZ7FrgZuC9btramoOJAeUiIjIgRub0Hltz/rQqpEqR08Zwe2Xn8jFv3qWVa83APDjB19j7Y4mvvueBZQU5oVcQ0k1tTtFRESy2KRF8fUtz4dXjwNUXRzhc4uK+dVrxTy93qdIuvGhtexubOeb584nP08T+KVC0n7VoNF4CPA/wAPAKmA18GCw75Bca1hqFkoRERl2I6bH1/dsCK8eKTRpRCm3XLqY0+aM6d539/LXefeNT1Bb3xpizWS4qN25L6U9EhGRrDDx6Pj6thcgmjkzipcVGL/+yCLeOK+me98tSzdz6W+fo7Ujc75HJklKT80Y59wG4Opk3lNEREQOwIhp8fUsDWoCVBQX8LMPHsvX71rJLx5bB8DLW+o450eP8bMPHhNy7WQ4qN25jytJ+D2U9khERDJS5UQor4HGWmhvhJ2vwNj9zg+YNooL8rjhwqP53K0vd+d+v29lLRf+7Cnefcxkpo4qZdroMsZWFGFmIdc28yU1qCkiIiIhq56MH33roH4zdLZDfmHYtUqJvIjx5bcdyowxZVz9t+V0RR2v17dy/g1P8KHD8pleGWH16w00t3fS3N4VFL/e1NZJS3sXTe1dtLR3BssumrrP9cuW9i4mjyjl+xcsYHxVSdhfWWQgSnskIiKZzwwmHA1r/um3tz6XUUFNgPy8CN857whGlRdy40NrAXh2wx6e3bCn+5ySgjymjir1Qc5RZUwdVca0UaVMGVXK+KoS8iIKeA6GgpoiIiLZJL/Iv+Gu3wwuCnWbYNTMsGuVUhceP5Vpo8r4+O+W0tDaSUtHFz9+IRji8/DQZ4DfVtfKl25fxs8+eOyQ7yWSKs65BqABwMyU9khERDLXxISg5palsOB94dbnIJgZn3/zPEaVFfKNf6za53hLRxerXm/ozhGfqDAvwsyx5Xz+zXM5ZfaYfY5LnFo7IiIi2abHEPT1YdViWJ10yGhuu+xEpowsTcn971u5ncde3ZmSe4uIiIhIggkJeTW3PBdePZLgklNmcttlJ/Cp02fx9iMncOSkKiqLB+5f2N4VZeW2ev7nzhXDVMvMpZ6aIiIi2WbEVNjwqF/PkaAmwKyx5dx++Yl85e/LeWDFNgoiMKKyjLLCPEoK8ygrzO+5LMqjtDCf0sK8oOT3WJYV5fGjf7/K7S9sBeB/7lzBXZ86WcOBRERERFJpwlHx9dplGZ9O6agpIzhqyoge+/Y2t7N+VzMbdjWxfmew3NXE+l3N7G5qB2B7Q1sY1c0oCmqKiIhkmxzsqRkzsqyQ77/3KB58sA6AJUtOHdL9PvfmedyzvLZ7iNBflm7iPcdOSUZVRURERKQvZaN8e3bPeuhq94HNxFnRs0B1aSELSgtZMLm6x/6mtk4Ou/oeANo7oyHULLNo+LmIiEi2yeGgZrKNqyrmY6fO6N7+zj1raGzrDLFGIiIiIjkgcQj61swegn4gCvLiYbr2LgU190dBzRQyswozm2BmE4CCaFT/QYqIyDBQUDOpLjllBuMqiwHY2djGDQ++FnKNRERERLJcYs/MLc+HV49hVpAXT3PUFXV0RV2ItUl/Cmqm1pXAlqDMr62tDbk6IiKSExKDmns3hFaNbFFamM9nz5zTvf3TR9ayZW9LiDUSERERyXI52lPTzCjMj4fqOtRbc0AKaqbWtcDEoLxcU1MTcnVERCQnlI2BgmAW8NY6aNkTbn2ywDuPmsgRk6oAaOuM8u1/rgq5RiIiIiJZbPyRYEHIascqaGsMtz7DqChhCHqb8moOSEHNFHLONTjntjrntgIdkYh+bhERGQZmUD01vq0h6EMWiRhXnX1o9/bfXtzKcxsVLJb0obRHIiKSVYrKYXQwUsZFYduL4dZnGCX21NRkQQNTlE1ERCQbKa9m0i2aPpI3Hz6ue/trd67AOeU5krShtEciIpJdJi6Mr+fQEPQeQU0NPx+QgpoiIiLZSEHNlPjcm+dSGAwJem7jXu58aVvINRLpprRHIiKSXSYeFV/fkjtBzR4zoKun5oAU1BQREclGCmqmxNRRZVx04rTu7W/9cxWtHV3hVUgkoLRHIiKSdXJ0siANPx88tXZERESyUWJQc+NTsPOV0KqSbS4/bRYjywoB2LK3hV88ti7kGomIiIhkoZrDIc+3udizHpp3h1qd4VKYp9nPB0tBTRERkWw0ckZ8fcdK+NEx8Nvz4NX7QXkgh6SqpIBPv/GQ7u0fP/AaOxraQqyRiIiISBbKL/SBzZgc6a2Z2FNTs58PTEFNERGRbDT6EJh9Vs99r/4Lfnsu/PR0aNwRTr2yxAWLpjBrbDkAjW2dXPevNSHXSERERCQLJU4WtOzWnHg5r+Hng6egpoiISDYygwv+CB/4G8x+M2DxY1ufgzsuC61q2SA/L8IXz57Xvf2nZzay6vX6EGskIiIikoWmnxxff+F3cM8Xsj6wWaTZzwdNQU0REZFsZQYzToX3/RE+uRQWfih+7JV7Yffa8OqWBU6bM5ZTZo8BIOrga3euxGV5I1tERERkWM19G8x7e3z7yR/7dmwW0+zng6egpoiISC4YNRPe9j2omR/fV7cltOpkiy++ZR6RoBPso6/u5IHV28OtkIiIiEg2iUTgvF/AIWfE961/NLz6DINCBTUHTUFNERGRXFI9Jb7ekhszSKbSnHEVXLAo/pt+/a6VmqVSREREJJnyCmBawjB0l91trcScmmpXDkxBTRERkVxSOiK+3qygZjJ8+k2zqSjKB+C1HU38/qmNIddIREREJMtYQvgqy9P9aKKgwVNQU0REJJeUjIyvq6dmUowuL+Ly02d1b3/vvjXUNXeEWCMRERGRLBPJi6/nUE/NNvXUHJCCmilkZhVmNsHMJgAF0aj+YxQRkZCVJgQ11VMzaS46YRqTRpQAsKe5gx/++5WQayQiIiKSRXr01OwKrx7DQDk1B09BzdS6EtgSlPm1tbUhV0dERHJej56ae8KrR5YpLsjj82+e1739qyfWs35nU4g1EhEREckiPYKa2R3o0/DzwVNQM7WuBSYG5eWampqQqyMiIjlPPTVT5i3zx3HMVJ+ztKPL8c1/rgy5RiIiIiJZwiy+nu1BTfXUHDQFNVPIOdfgnNvqnNsKdEQi+rlFRCRkyqmZMmbGl956aPf2PctreXLtrhBrJLlEaY9ERCSr5WpPza7sHmo/VIqyiYiI5BL11EypIydX886jJnZvf+2uFUSj2T1Dp6QNpT0SEZHsZQkTBUWzO9CXGNTs6FI7ciAKaoqIiOQS9dRMuc+eOYfiAt/EWralnluf3xJyjSRHKO2RiIhkrx49NbM70Kfh54OnoKaIiEguKRkRX2/ZAxqimnQTqku45OQZ3dvfuWcVze2dIdZIcoHSHomISFbLoeHnBQk9NdsU1ByQWjsiIiK5JL8QCiv8uotCW1249clSHzt1JmMrigCorW/jxofWhlwjERERkQyWQ0HNIvXUHDQFNUVERHJNaUJvTeXVTImyonw+c+ac7u0bH36NbXUtIdZIREREJIP1CGrmTk7N9i4FNQeioKaIiEiuKdFkQcPhXUdP4tDxlQC0dkT5zj2rQ66RiIiISIaKJEwUlOU9NXsENTuzO4A7VApqioiI5JpSTRY0HPIixlVvnde9fetzW3hp897wKiQiIiKSqczi61ke1IxNOAmwp7kjxJqkPwU1RUREco16ag6bE2aO5k2Hxmeh/tqdK3FZPmOniIiISNIlDj+PZnfvxbnjKrvXn9+4h8Y2TTjZHwU1RUREco16ag6rL7xlHvkR37vg6fW7uXvZ6yHXSERERCTD5NBEQROqS5gXpDDq6HI8uHp7yDVKXwpqioiI5JoePTV3hVePHDF9dBkfWDyte/ub/1xFm/IjiYiIiAyeJebUzP5RL2+cN7Z7/b//8pJeivdDQU0REZFcUzUpvr57bXj1yCFXvOEQqksLANi4u5lfPb4+3AqJiIiIZJIc6qkJ8O5jJlNelA9AU3sXl/52Kd++exVd0ewP6B4IBTVFRERyzZg58fUda8KrRw6pKi3gijcc0r39w/tfZVdjW4g1EhEREckgibOfd7aEV49hMnlkKbdcupgpI0u79/3kwde46JdPs7upPcSapRcFNUVERHLN6Nnx9V2vQJeSjw+HC4+fyowxZQA0tHXyvfteCblGIiIiIhli5Iz4+usv58QQ9HnjK/n7J07itDljuvc98spO3vbDR1m2pS7EmqUPBTVFRERyTUk1VIz3613tsGd9mLXJGQV5Eb74lnnd279/eiOv1DaEWCMRERGRDDFyJhRX+fXmXbB3Q7j1GSZVpQX8/IPH9hjxs2VvC+/6yeP8ZenmEGuWHhTUFBERyUU9hqCvCq8eOeb0uWM5cdYoALqijq//Y2XINRIRERHJAJEITDg6vr352fDqMswiEePTb5rNzz5wDBXFPs9mW2eUz9zyIlfd/jLtndmfY7Q/CmqKiIjkojFz4+s7V4dXjxxjZlx19qGY+e0HV+/goTU7wq2UiIiISCaYdEx8fctz4dUjJG88tIa/feIkZteUd+/77ZMbee9NT1Bb3xpizcKjoGYfzOxyM3vJzOqD8oSZnR12vURERJImMa/mDgU1h9O88ZW855jJ3dtfv2sFnV25+4ZdREREZFAmLoyvb8mdnpqJpo8u47bLTuStR4zv3vfcxr2c/YNHeXrd7hBrFg4FNfu2Gfhv4GjgGODfwO1mdkSotRIREUmWxJ6aGn4+7P7fGbMpK/SzeK6pbeSPz2wKuUYiIiIiaS4xqLntRejqCK8uISoryueHFxzFVWfPIy/ih//sbGzjfT99kl8+tg6XA5MoxSio2Qfn3B3OuX865151zq1xzn0RaAAWh103ERGRpOgR1FwDUfUUHE5jK4q57LRZ3dvf/dca6ltzs2GeyzQ6SERE5ACUj4WqKX69sxVql4dbnxCZGRefPIPffGQRo8oKAeiMOr7y9xV8+k8v0NLeFXINh0dGBjXN7Dwz+6GZPRI0AJ2Z/XY/10wys1+Y2VYzazOz9Wb2PTMbsZ/r8szsvUA58Hgyv4eIiEhoykZB6Wi/3tkCdeopONw+ctJ0JlaXALCrqZ3rH3g15BpJCDQ6SERE5EBMShyCvjS8eqSJE2aO5u+fPIkjJ1V177v9ha2c+5PH2birOcSaDY+MDGoCVwGfABYAW/Z3spnNBJYCHwKeBr4LrAWuAJ4ws1F9XDPfzBqBNuAG4J3OuZeT9QVERERC16O3pvJqDrfigjz+66z4LPS/fHQ9m3Znf+NT4jQ6SERE5ABNVFCztwnVJfzpY4u5YFE8Z/vKbfW89YeP8MDq7SHWLPUyNaj5aWA2UAl8fBDn/xgYC3zKOfcO59znnHOn44Obc4Cv93HNanzQ9HjgJ8CvzOzwJNRdREQkPYxJnCxIeTXD8PYjJ3DUlGoA2ruifOufeg7pRKODRERE0szExBnQFdSMKS7I45vnHsE3z51PYZ4P9dW3dvLhm5/hB/e/QjSanXk2MzKo6Zx7wDn3ihtE9lMzmwGcAawHru91+GqgCXi/mZX1+oz24K35s865zwMv4IOpIiIi2UE9NUNnZlx19qHd23e9vI1n1+fezJVpTKODRERE0sn4I8H8ZIvsWA2t9eHWJ81csGgKf750MeOrigFwDq771xou+c2zWZm/PT/sCgyD04Plvc65HrMgOOcazOwxfNDzeOD+Ae4TAYr292Fm1t+rgrkNDQ08+OCD+6/xQWpoaABI6WdI6uk5Zj49w8yXK8+wek8rC4L1urXP8HwWfd9Me4bHjcvjqdd9QvfP/uEpvnR8MRGz0OoT+/2ET+PzXr4KnAo8sJ/zE0cH/TC208yuC+71deDSXtfERgdVA+/Cjw5a4pxbloT6i4iIZJfCUqg5FF5/GXCw9XmYcWrYtUorCyZX8/dPnsQnfv8cT671L8vvW7mdc370GDdcuJA54ypCrmHyZGRPzQMUS1a1pp/jrwTL7jF4ZvYtMzvZzKYFb8+/CSwBfpe6aoqIiAyv5tJ43p2ypk3+Va6E4vw5heQHrbJ1dVGe3JYbM1amO40OEhERSUM98mo+G1490tjo8iJ++5Hj+OjJ07v3rdvZxDt//Bh3vrQ1xJolVy701IxNAVXXz/HY/uqEfeOA3wbLOuAl4M3OuXv292HOuYV97TezpRUVFUcvWbJkEFU+OLHeKKn8DEk9PcfMp2eY+XLmGToHz/8ntNaR39XMkoVzoHJC2LVKikx8hmttFT9+8DUA/r7B+PR5J1NSmBdKXSoqsucN/jAa1tFBMPwjhDKtB7T0Tc8xe+hZZgc9xwM3rrGCWBKlHS/ew/KuPsMwwypdn+OJZZB/ZBE/X9ZGexc0t3fxid8/z52PL+P82QXkRcIbGRQzlBFCudBTc39iT7D7Dbxz7iLn3FTnXJFzbqxz7o2DCWiKiIhkFDMYHZ99W5MFheuy02YxurwQgG11rfzskbUh10gOkEYHiYiIDIOGikO61yvr12i00X4cNz6fLx9fQk1pPIB59/oO/u/ZVurbMvu3y4WemrGemFX9HK/sdV7SmFkFEOvqUBCNRgc6XUREZPiNmQObn/brO9bAzNMHPl9SprwonyvPmMPnb/VzxPzkodd497GTqaksDrlmMkjDOjoIhn+EUCb2gJZ96TlmDz3L7KDneBCiXfDSF6C9kaL2PSw5ejZUTQy1SpnwHN/2xg7+359e4P5V2wFYuTvKN5+L8pMLF7JgcnVo9RrKCKFc6KkZm851dj/HYyH+/t6qD8WV+JkytwDza2trU/ARIiIiQ9BjBnT11Azbu4+ZzNwgeXtzexffu++V/VwhGUSjg0RERJIhkgcTjopvK6/moFSVFPDTDxzDp984m9h8lNvqWnnvTU/w6vbMnCQyF4KasVkqzzCzHt836El5ItACPJmCz74WmBiUl2tqalLwESIiIkPQI6i5uv/zZFjkRYwvnj2ve/vvL26lrVOTBmWI0EYHgW/XmtkEM5uARgiJiEi26zFZUH8ppqW3SMS44o2H8IsPHktlsR+83doR5et3rQy5Zgcn64OazrnXgHuBacDlvQ5/BSgDfu2ca0rBZzc457Y657YCHZFI1v/cIiKSacYkDGRQT820cNKs0UweWQJAY1snT7y2K+QaySCFOToINEJIRERySWJQc7OCmgfqtLlj+f1Hj+/usfnA6h08vGZHuJU6CBkZZTOzd5jZzWZ2M/C5YPfi2D4z+79el1wGbAd+YGa3m9k3zezfwKfxDcsvpqieemMuIiLprXISFJT59Zbd0LQz3PoIZsaZh47r3r5nuYJTGSLM0UGgEUIiIpJLJh0TX9/6vM+zKQfk8IlVvOeYyd3bX7trBZ1dmRW3ysigJrAA+GBQzgz2zUjYd17iyUFvzWOAm4Hj8G+yZwI/ABY751LVBUJvzEVEJL1FIuqtmYbOPDwe1PzXilq6opk9M2UuCHN0UPD5GiEkIiK5o3ICVIz36x1NasMepP93xmxKC/MAWFPbyJ+f3RxyjQ5MRrZ2nHPXOOdsgDKtj2s2Oec+5Jwb75wrDJKyX+Gc253CquqNuYiIpD9NFpR2jp4ygtHlhQDsbGzj+Y17Qq5RbsqU0UEiIiI5SXk1h2xsRTGXLZnZvX3dv1bT0NoRYo0OTEYGNTOF3piLiEhGGDMnvq7JgtJCXsR406Hxl6H3rtBoj5AsIDNGByntkYiI5J4eeTU1A/rBuvjkGUyoKgZgZ2M7v35iQ8g1GjxF2URERHLdaAU109Eph4zpXn+ltiHEmuSuDBodBEp7JCIiuSYxr+aW58KrR4YrLsjj46fN6t5esa0+xNocGAU1U0hvzEVEJCOop2ZaGlVe1L3e0NoZYk0kQyjtkYiI5JbxC4Bg+u7ty6E9JWmrc8KkESXd65nU7lRQM7X0xlxERNLfiGmQFwTQGl+HFuVvTAeVJfnd6/UZlNtIwqG0RyIiknOKK+O54V0Utr0Ybn0yWEVRvN2pnJoSozfmIiKS/iJ5MDphBvTtmiwoHVQWF3Sv17dkzhtzERERkWGjvJpJUZHQ7mxUT00BvTEXEZEMMnZefH3HyvDqId0qSxKCmhn0xlzCobRHIiKSkyYlBDU3PRVePTJcRXFiT00FNUVERCSTJAY1tyuomQ7KCvOIBGmimtu76OhSkEoGpLRHIiKSe6aeGF9f/whEu8KrSwYrL9bwcxEREclUYw+NryuomRbMLGOHAkkolPZIRERyz+jZUDHer7fWwdYXQq1OpiovzMeCl+lN7V10RV24FRokBTVTSMOAREQkYyT21KxdDi4zGjLZTpMFyWAp7ZGIiOQkM5ixJL699oHQqpLJIhGjvDDe7mxsy4yX6WrtpJaGAYmISGaomgwFpX69ZbdmQE8TmixIREREZD9mnBZfX/tgaNXIdJk4BF1BzdTSMCAREckMkQgUVca3O1vDq4t06xHUzJDGpYiIiMiwmnFqfH3TU9DeFF5dMlgmThakoGYKaRiQiIhklLx4AI1oZjRksl2P4ectCmpK/5T2SEREclbFOBgTpFLqaofNz4ZbnwzVI5e7hp+LiIhIRonEA2h0KYCWDtRTUw6A0h6JiEjuGj0rvq40SgelQsPPRUREJGOpp2baqSxRTk0ZNKU9EhGR3JVXGF/Xy/mDUl6UecPP8/d/ioiIiOQE9dRMO+qpKYPlnGsAGgDMTGmPREQkt/QIaraHV48Mljj8XEFNwcwqgIpgU7mNREQkvSUGNaMKoKUD5dQUERERGYTEEUcKah6USk0UJL0ot5GIiGSOHo3BzGjIZLuePTX1TERERET6pOHnQ9Zz+Hlm/IYKaqaWchuJiEjmiCTm1MyMhky265lTU89EREREpE95RfF19dQ8KBUZ2FNTw89TSLmNREQko/ToqakAWjpIHAaknJoyEKU9EhGRnKbh50OWmFOzsS0zgpqKsomIiIinnJppR7OfywFQ2iMREcldGn4+ZOXFGn4uIiIimUo5NdNOj6BmhjQuJTRKeyQiIrlLs58PWUWPEUKZ8W8BDT8XERERTzk1006P4efKqSkDUNojERHJaRp+PmSJE1Q2ZkhQU60dERER8fIS3nVq2E5aKCvMJ2J+vam9i84u5UkUERER2YeGnw9Zj4mC2jLjN1RQU0RERLwePTUz4+1stotErEfS9kyZiVJERERkWKmn5pCVF2Xe7OcKaqaQmVWY2QQzm4BmoRQRkXSn2c/TUmWJZkAXERERGZByag5ZRa/h5865EGszOApqppZmoRQRkcyh2c/TUmJ+I82ALiIiItIHDT8fssL8CEX5PkzYGXW0dqR/xzwFNVNLs1CKiEjmyNPw83TUI6ipnpoiIiIi+9Lw86TokVczA9qdmv08hTQLpYiIZJTEnppdCmqmix7DzzUDuvTDzCqAimBTaY9ERCS3aPh5UlQUF7Cz0f9+9a2djK0MuUL7oSibiIiIeD0mClLwLF2op6YMktIeiYhI7tLw86RI7KnZ2Jb+nRwU1BQREREvL7GnphqD6aKyRDk1ZVCU9khERHJXj+HnbeHVI8P1nAE9/f89oOHnIiIi4kWUUzMdqaemDIbSHomISE5TT82k6JlTM/3/PaDWjoiIiHg93nCrMZguEhuXyqkpIiIi0gfl1EyKioSX6Y0KaoqIiEjGSJwoSDk100aP4ecZ0LgUERERGXaa/TwpEoefZ8IIIQU1RURExFNPzbRUqZ6aIiIiIgPT8POkqNTwcxEREclIyqmZlnr21FQjXURERGQfGn6eFInDzzMhqKmJglLIzCqAimCzIBqNhlkdERGRgamnZlrqMVGQZj8XERER2ZeGnydFYi73xrb0//eAemqm1pXAlqDMr62tDbk6IiIiA1BOzbRUWZJZuY1EREREhp2GnydFuYafS4JrgYlBebmmpibk6oiIiAygxxvu9G/E5Ioew8+VU1NERERkXxp+nhQafi7dnHMNQAOAmXVEIoohi4hIGuuRU1PBs3RRXpiPGTgHTe1ddHZFyc9Tm0J6UtojERHJaUqjlBSJw88b2tI/qKkWsYiIiHh5Ce861RhMG5GIUVGUWUOBJBRKeyQiIrlLPTWTomebM/3/PaCgpoiIiHg9cmoqcJZONAO6DILSHomISO5SUDMpNPxcREREMlOP4efp34jJJX4G9BZAM6BL35T2SEREclokDzDAgYtCtCvYJweix/DzDHiRrtaOiIiIeMpFlLY0A7qIiIjIAMwgvyi+rd6aB6W0MI+I+fXWjigdXemdo1tBTREREfF6DD9X4CydVBZrBnQRERGRAWkI+pCZGeUJeTUb03wIuoKaIiIi4vXoqZneDZhco5yaIiIiIvuhUUdJkUl5NRXUFBEREa9HTk01BNNJz56a6d24FBEREQmFemomRY+8mm3p/W8CBTVFRETEy0sYfq6322lFOTVFRERE9qNHT00FNQ9Wz8mC0vtluoKaIiIi4mn287SlnJoiIiIi+9Gjp6baSwdLw89FREQk8ygPUdpKzKmZ7o1LERERkVBo+HlSJPbUbNTwcxEREckIyqmZtiqLNfxcREREZEAafp4UibOfp/vLdAU1RURExOuRUzO9GzC5psfs55ooSERERGRfGn6eFJk0/Dx//6fIwTKzCqAi2CyIRqNhVkdERGRg6qmZtnrk1FRPTREREZF9afh5UlRk0Agh9dRMrSuBLUGZX1tbG3J1REREBqCcmmmrx+znmihIREREZF8afp4UPXJqpnlPTQU1U+taYGJQXq6pqQm5OiIiIgOIJAzgUE/NtNJj+HmaNy4lHGZWYWYTzGwCGiEkIiK5SMPPkyIxqKnh5znMOdcANACYWUckohiyiIiksYhyaqar8sJ8zMA5aGzrpLMrSn6e2hXSw5XA1bENjRASEZGco+HnSVFRlJhTM72DwwpqioiIiFdYBh++xwc3E4fvSOgiEaOiKL+7l2ZjWyfVpYX7uUpyzLXATcH63TU1NfPDrIyIiMiw0/DzpChPHH7elt4dHRTUFBERES+SB1OOD7sW0o/KkoLuoGZ9i4Ka0pNGCImISM4rGwvVU3yPzfySsGuTsTT8XERERESSqqK4AGgB0n8mShEREZFh95b/9UWGpLI4cfh5egc19QpXREREJANUFmsGdBERERFJrcSemun+Il1BTREREZEM0HMG9PRuYIqIiIhIZior6plT0zkXYm0GpqCmiIiISAZIHApU35LeQ4FEREREJDMV5EUoKcgDwDloau8KuUb9U1BTREREJANUlmTOUCARERERyVw9JwtK33angpoiIiIiGaBnT830bVyKiIiISGYrTwhqNqbxZEEKaoqIiIhkgJ45NdO3cSkiIiIima2iODPanQpqioiIiGQAzX4uIiIiIsOhUsPPRURERCRZNPu5iIiIiAyH8qLEoKZ6aoqIiIjIEGj2cxEREREZDokTBTW2pW+7U0FNERERkQyg2c9FREREZDgk5tTU8HMRERERGRLNfi4iIiIiw6GiWMPPRURERCRJNPu5iIiIiAwH5dTMYGb2eTN7xszqzWyHmf3dzA4Pu14iIiKSuyqK8jHz641tnXR2RcOtkIiIiIhkpcoew88V1Mw0S4AfAycApwOdwH1mNjLMSomIiEjuikSsx1vzdE7aLiIiIiKZq+fw8/RNe5S//1Nyj3PuzMRtM3s/UAecCPw9lEqJiIhIzqssLuh+W17f0kl1aWHINRIRERGRbFOu2c9Tx8zOM7MfmtkjwRBxZ2a/3c81k8zsF2a21czazGy9mX3PzEYM4iMr8L/VnqR8AREREZGD0DOvZvq+NZfBU9ojERERSTcVGn6eUlcBnwAWAFv2d7KZzQSWAh8Cnga+C6wFrgCeMLNR+7nF94EXgCcOusYiIiIiQ1SZ8NZcM6BnjSUo7ZGIiIikEQ0/T61PA5uBV4FTgQf2c/6PgbHAp5xzP4ztNLPrgnt9Hbi0rwuDc04CTnLOdQ296iIiIiIH58oz5tDS0UVlcT6zxpaHXR1JAqU9EhERkXQzoaqEG9+/kIrifEakcbqjjAxqOue6g5gWmwa0H2Y2AzgDWA9c3+vw1cAlwPvN7ErnXFOva78LvBc4zTm3dug1FxERETl4i6ar895wM7Pz8C/RFwBH4tMS/c45d+EA10wCvgqcBYwCtgG3A19xzu0vnZHSHomIiEioSgrzOPOwcWFXY78yMqh5gE4Plvc656KJB5xzDWb2GD7oeTxwf+yYmX0fH9Bc4pxbNdgPM7Ol/Rya29DQwIMPPnggdT8gDQ0NACn9DEk9PcfMp2eY+fQMM5+e4dDEfj8BfNqjI4FG/EihuQOdHKQ9ehw/SugOYBWwCJ/26CwzO9E5t2uAWyjtkYiIiMggZGpOzQMxJ1iu6ef4K8FydmyHmV2Pz795AbDHzMYFReO8RERERHLLp/HtxErg44M4PzHt0Tucc59zzp2Oz+k+B5/2qE8JaY/epbRHIiIiIgPLhZ6aVcGyrp/jsf3VCfsuC5b39zyVrwDXDPRhzrmFfe03s6UVFRVHL1myZKDLhyTWGyWVnyGpp+eY+fQMM5+eYebTMxyaioqKsKuQNpT2SERERCQ95UJQc39irVMX2+GcG7jFKiIiIiKyr6xOe6S0DtlBzzF76FlmBz3H7KDnePCGkvYoF4afx3piVvVzvLLXeUljZhVmNsHMJgAF0Wh0v9eIiIiISMZS2iMRERGRYZILPTVXB8vZ/Rw/JFj21/gciivxQ40AqK2tTcFHiIiIiEiayOq0R0rrkB30HLOHnmV20HPMDnqOB28oaY9yIagZy4N0hplFEocCmVkFcCLQAjyZgs++FrgpWL+7pqZmfgo+Q0REREQyg9IeiYiIiCRJ1g8/d869BtwLTAMu73X4K0AZ8OveydqT9NkNzrmtzrmtQEckkvU/t4iIiEguU9ojERERkWGSkT01zewdwDuCzXHBcrGZ3Rys73TOfSbhksuAx4EfmNkbgJXAccBp+GHnX0xxlUVEREQk+yntkYiIiMgwycigJrAA+GCvfTOCArAB6A5qOudeM7NjgK8CZwFvAbYBPwC+4pzbnYpKBsPbY8kB9MZcREREJLsp7ZGIiIjIMMnI8dDOuWucczZAmdbHNZuccx9yzo13zhU656Y6565IVUAzcCWwJSjz9cZcREREJHsp7ZGIiIjI8DHn3P7PkoPSq6fm8pKSkup58+al7PMaGhqAoc0cJeHTc8x8eoaZT88w8+kZDs3KlStpaWnZ7ZwbFXZdwtZH2qMzgbXAI8G+HmmPzGwmPu3RWOAO9k17dIJzbleK67yrpKRkZLLbnfpzlR30HLOHnmV20HPMDnqOB28o7U4FNYeJma3DJ4dfn8KPmRssV6XwMyT19Bwzn55h5tMzzHx6hkMzDah3zk0PuyJhM7NrSMhV2YcNvUcJmdlk4mmPRuHTHt1OCtMe9fr8VLU79ecqO+g5Zg89y+yg55gd9BwP3jQOst2poGYWMbOlAM65hWHXRQ6enmPm0zPMfHqGmU/PUCT59OcqO+g5Zg89y+yg55gd9BzDoWQ7IiIiIiIiIiIiklEU1BQREREREREREZGMoqCmiIiIiIiIiIiIZBQFNUVERERERERERCSjKKgpIiIiIiIiIiIiGUWzn4uIiIiIiIiIiEhGUU9NERERERERERERySgKaoqIiIiIiIiIiEhGUVBTREREREREREREMoqCmiIiIiIiIiIiIpJRFNQUERERERERERGRjKKgpoiIiIiIiIiIiGQUBTVFREREREREREQkoyioKSIiIiIiIiIiIhlFQc00Z2aTzOwXZrbVzNrMbL2Zfc/MRoRxHzlwQ/3tzWyUmV1sZreZ2atm1mJmdWb2qJl9xMz05zjFUvHnx8zeb2YuKBcns77St2Q+RzM72cz+ambbgnttM7N7zewtqai7eEn8O/Hs4HltDv6futbMbjGzxamqu0i6UlszO6i9mR3U5sweandmB7U9058558Kug/TDzGYCjwNjgTuAVcAi4DRgNXCic27XcN1HDlwyfnszuxT4CbANeADYCNQA5wJVwF+B853+MKdEKv78mNlk4GUgDygHPuqc+1ky6y09JfM5mtlVwP8AO4E78X82RwNHAQ845/4r6V9Akvl34reB/wJ2Abfjn+Ms4O1APvAB59xvU/AVRNKO2prZQe3N7KA2Z/ZQuzM7qO2ZIZxzKmlagHsAB3yy1/7rgv03DOd9VMJ5hsDpwNuASK/94/ANTge8K+zvmq0l2X9+AAPuA14DvhPc4+Kwv2e2lyT+//T84Px/ARV9HC8I+7tma0nS/0/HAV3A68DYXsdOC+6zNuzvqqIyXEVtzewoam9mR1GbM3uK2p3ZUdT2zIyinpppysxm4P8CWg/MdM5FE45V4N/QGP4PRlOq7yMHbjh+ezP7AvB14EfOuU8OudLSQyqeoZldAXwXWIL/B8TV6K15SiXx/6cR4FV8z5Vpzrkdqay3xCXxGR4HPAn8zTl3Th/H6/GjWCqS+w1E0o/amtlB7c3soDZn9lC7Mzuo7Zk5lBslfZ0eLO9N/AME4JxrAB4DSoHjh+k+cuCG47fvCJadQ7iH9C+pz9DM5gHfAr7vnHs4mRWVASXrOZ4ATAf+AewJcuP8t5ldoXw4KZesZ/gK0A4sMrPRiQfM7BSgAt+rRSQXqK2ZHdTezA5qc2YPtTuzg9qeGUJBzfQ1J1iu6ef4K8Fy9jDdRw5cSn97M8sHPhBs3n0w95D9StozDJ7Xb/BDuL4w9KrJAUjWczw2WNYCz+HzGn0L+B7wuJk9ZGZjhlBP6V9SnqFzbjfw3/heDyvM7CYz+6aZ/Rm4Fz+862NJqK9IJlBbMzuovZkd1ObMHmp3Zge1PTNEftgVkH5VBcu6fo7H9lcP033kwKX6t/8WcDjwD+fcPQd5DxlYMp/hl/EJvU9yzrUMsV5yYJL1HMcGy0uBdcAbgaeAqcC1wJnALfhhXpJcSfuz6Jz7npmtB34BfDTh0KvAzc657QdZR5FMo7ZmdlB7MzuozZk91O7MDmp7Zgj11MxcFiyHmhQ1WfeRA3fQv72ZfQq4Ej8D2/uTWSk5IIN6hma2CP+m/Frn3BMpr5UcqMH+WcxLOP8859z9zrlG59xy4J3AZuBUDQkKxaD/f2pm/wX8BbgZmAmUAQuBtcDvzOx/U1RHkUyjtmZ2UHszO6jNmT3U7swOanumCQU101cs8l/Vz/HKXuel+j5y4FLy25vZ5cD3gRXAaUGXdkmNIT/DhCFAa4AvJa9qcgCS9WdxT7Bc65x7MfFA0BMi1oNl0QHXUPYnKc/QzJYA38Yna/9/zrm1zrlm59xz+H8gbAGuDJLDi2Q7tTWzg9qb2UFtzuyhdmd2UNszQyiomb5WB8v+cjQcEiz7y/GQ7PvIgUv6b29m/wn8CFiGb2C+ftC1k8FIxjMsD66fB7SamYsV/CyUAD8N9n1vqBWWPiX7/6d7+zkea3yWDK5acgCS9QzfGiwf6H3AOdcMPI1vGx11oBUUyUBqa2YHtTezg9qc2UPtzuygtmeGUE7N9BX7j/4MM4skzrhlZhXAiUAL8OQw3UcOXFJ/ezP7b3xeoxeANznndia3utKHZDzDNuDn/Rw7Gv8X2KP4vzg1TCg1kvVn8WH8zK+HmFmhc6691/HDg+X6oVdZeknWMywKlv0l1o/t7/1sRbKR2prZQe3N7KA2Z/ZQuzM7qO2ZKZxzKmla8F3KHfDJXvuvC/bfkLCvAJgLzBzKfVTS9hl+KTj/WWBk2N8rl0qynmE/974muMfFYX/PbC9J/LP42+D8r/Xa/yYgin+bXh32983GkoxnCLw7OPd1YGKvY28OnmELMCrs76uiMhxFbc3sKGpvZkdRmzN7itqd2VHU9syMYsGPKWnIzGYCj+NnPrsDWAkcB5yG7+Z8gnNuV3DuNPysaBucc9MO9j6SXMl4hmb2QXxS4S7gh/Sdt2O9c+7mFH2NnJasP4f93Psa/HCgjzrnfpaC6ksgif8/HQs8BswCHsEPGZmKz4njgPc5525J/TfKPUn6/2kE30B9I9AA3IZvZM7DDw8y4D+dc98fli8lEjK1NbOD2pvZQW3O7KF2Z3ZQ2zNDhB1VVRm4AJOBXwLb8F2SN+CTdo/sdd40/P/Y1g/lPirp9wyJv1kdqDwY9vfM5pKsP4d93Df2bPXWPIOeIzAS/4Z2XXCfXfiGzvFhf8dsL8l4hvg36f+JHy5Ujx/atR24Ezgj7O+oojLcRW3N7Chqb2ZHUZsze4randlR1PZM/6KemiIiIiIiIiIiIpJRNPu5iIiIiIiIiIiIZBQFNUVERERERERERCSjKKgpIiIiIiIiIiIiGUVBTREREREREREREckoCmqKiIiIiIiIiIhIRlFQU0RERERERERERDKKgpoiIiIiIiIiIiKSURTUFBERERERERERkYyioKaIiIiIiIiIiIhkFAU1RURERERERKRPZnaNmTkzuznsuoiIJFJQU0RE0oKZHWFml5rZz83sJTPrDBrQfxzEteuDc3uXdjPbamZ3mtn5w/E9RERERJIhCCZeY2bVYdclE5nZaDM738y+bWb/NrO6hDZi8X6uvaaftmU0uM8zZna1mY0Yru8jIvvKD7sCIiIigV8DRw7xHk1AY8J2JTAeOBs428z+DLzXOeeG+DkiIiIiqXZ1sLwZ2BteNdgJrAa2hViHg3Eh8N0h3iMK7EjYLgRGAMcE5RIzW+Kce2WInyMiB0E9NUVEJF10AC8APwM+BtxzEPf4P+fcuIRSCkwDfhUcfze+gSsiIiIig+Cc+5Fzbq5z7vNh1+UAOWAzcBvwReALB3GPTb3aliOBMuBDQDMwAbghWRUWkQOjnpoiIpIujnfOdcU2zOyEZNzUObfBzD4MHAfMBd4G/CYZ9xYRERGRtPUj59z3YxtmtiQZN3XONQM3m9kE4OvAaWZW5pxrSsb9RWTw1FNTRNJGQl7EJWY23sxuMLNNZtZiZivN7NNmFkk4/3wze8TM9ppZvZndZWaH93Pvc8zsH2ZWa2YdZrbbzFab2R/M7D19nD/WzL5jZsvMrMnMWoO6PG5mXzWzqb3Ovzmo+zVmVmxmXzGzVUHdtwefM3uA736cmX3TzJ40sy1BLsjtZna3mZ03iN9uVPCZS4Pfo9nM1pjZH83snH6uKTSzTwS/4W4zazOzDWb2CzOb18f5rwTf8RP7qcs9wXnX7a/eiRIDmsnmnIsCy4PNslR9joiIiGS2dGiPxtqVCZeu65XX8eaEcx8M9l00mO/Ua3/3BEBmFgnahU8H38WZ2YLe5/Vx71idppnZFDP7qZltDtqV68zs/8yscoC65ZnZf5rPp95iZjvM50I/sff9+7tHf1LZtgy8FCwNKE3xZ4lIH9RTU0TS0XTgD8A4oB4owPewuw6YAXzSzL4F/DfQhR/6UQG8BTjBzBYl5rUxs6/Tc7hJA1ACzA7KacCfEs6fCjyBz8VI8Bn1wERgErAY2ErfQ02KgAeA44F2oBUYA7wXeLuZvdk593DiBWZWDjyZsKsj4bozgTPN7Cbn3Mf6+rHM7GT8sJpRwa7Y5x4SlPfgG1uJ14wH/kk8h2UUn49yCn44zQVm9h/OuVsTLvsD8CXgfcCP+qnLWOANwebv+zonDME/Pg4NNl8Nsy4iIiKSEcJsj9YBtUBNsL0z+AwSjieTAbcC5wSf03AQ9zgS+AUwMrg+gk8BdCVwqpmd4Jzr6PGhZgXAHcCbg12d+BjF2fj273sPoh7DaX6wrHPO7RjwTBFJCfXUFJF09F1gHXCkc64KP9nLl4Jjl5vZF4D/B/wnUOWcq8Q3KlYD1fhhIAAEb3U/F2x+ExjjnKt0zpXgG4rnAXf1+vyr8QHNV4FTgMIgf05J8DlfA17vp+4fB44APgiUB/U/CngO/wb3z7bvLIlR4B/ABfjAaXHwnUYAn8RPfHOJ9TF7t5nNBO7EBzRfAE4HSoPPHYkPit7a65pYA/JI4OHgO5YEnzkOuBYoBn4T3D/md8Fy8QBvy88H8oBXnHPP9nPOsDKzKcBPgXlAG/DjcGskIiIiGSC09qhz7grn3LiEuhzbK6/jFUn+rucCZwGXAZXOuRFBvdYewD1uxrdF5we/RTnwEXzb6xjgo31ccxU+oNmF/x1jnz0NuBufZz3tmFmJmb0fn6cT4HshVkcktznnVFRUVNKiAOvxCb13A9V9HL8/OO6AL/dx/OTgWCs+EAl+YhgHrDyAeqwIrnnPAVxzc0Ld/qOP46Pxb9kdcNUB/i7vD657oI9jfw6OrQYqBnm/i4NrngaK+jnnx8E5P+q1/7lg/+f7ue7R4Pg1SfjvIfab/vEA/ttpxAecY6Up2N+JbxwfF/Z/5yoqKioqKirpW9KlPRpcF/ucaQOc82BwzkWD+E5Leu2/JuEzLhng+th5Nw9Qx2V9tSuBHwbH/91rf3nQbnPAF/q4rgAfJN3vb3AAv+eShPsV7+fc2Hfu6tW23JVwj9XApwEL+79bFZVcLeqpKSLp6Abn3N4+9t8XLNvxQ396ewzfgCwCZgX76oNllZkNNtdN7JrxA57Vtw30MezaObcTuDHY3G+OzF7+HiyPN7O82M5g2Po7g80vO+cGO1Tog8HyeudcWz/nxL7Dm/rZf0HvC4IekSf0Om+4leF7FsRK7Jnn4Xu+jg6pXiIiIpJZwm6PDqdd+KHjQ3FdP+3K24Nl7zyjZ+Lbba3AD3pf5PxQ9QPKz54iEXq2LUcmHKvGty0Lhr9aIgIafi4i6enlfvZvD5brnXONvQ86PxnMzmAzNsT7Kfyb9vHAE2Z2iZlN38/n/yNYftvMrjez08ysZJB1f8g55/o7FiwPN7PCxANmlm9mHzE/MdC2ILm6C5LE7wlOK074XuCH8uTj3xTfPZjKmVk+sCjYvM7MXu+r4HN0AkzudYs/4IfLz+8jCf4F+JxMS51zawZTnxT4inPOYgUf1DwMP6T+aOBvZvbBAe8gIiIiEn57dDg965zrHOI9nuln/5Zg2Tv90lHB8oW+fsfAI0OsUzJs6NW2zMPnoP8PfED2C8CdiZNHicjw0R88EUlH2/rZ37Wf44nnFAA45/bgh2/vxee6vBFYGwQOf2Vmp/Zxj28DfwMK8bmF/g3Um5/5/LNmVj3A528ZxLFYr0Ggu8flQ/i8QWfi81p2ATvwSeJrE+6ROHN3LHl8nXNusAnjR+K/V2y9pp8S69HYI5jrnNtCvIHZu7dmbLtHL83+AqfWx6zzyeaca3HOrXDOfQb4P/zfe9cNNAuniIiICOG3R4dTMia56W/EUGuw7D1JcaytOdDvuLWvnWb2/X7alrf2dX4yOeeizrlNzrnf4yc06sCPbHpfqj9bRPaloKaIZD3n3D/wCccvweeg3IoPHH4AeNDMbup1fptz7hz8LOf/i5+Z3CVsrzGzIzlw1s/+L+GHbe/EDw2vcc6VOufGOp8kfmI/9+jvfgNJ/P/+kYlvnvsrfdxjnyHoZjYPP/FQFPhjr/P7C5wOtvdrstwcLEcCbxzmzxYREZEcdqDt0WHWtf9Tkm4w7dj+Rj9V0XfbcmQ/56eEc24ZsDTYfPdwfraIeApqikhOcM7VOed+6px7j3NuIn5I8k+Dwx81s7P7uOZJ59x/O+cW43tWXgBsBMbQ/2yMEwaoRixHZxfxIeXgZwwH+KRz7tfOue09L+vukdlbbAb2KjOrGuBzE+0i3nA9dJDX9HYLPo/UdDM7PtgXC3A+5Jzr8VZ9gIDpzQf5+QdrY8L6jGH+bBEREclxB9MeHYTYsPHiAc4ZbDtxOMV6hw6Uw77PdrVz7qJ+2pZLkl7L/Yu1L9W2FAmBgpoikpOCIcmX4HthAgw47Mc51+Sc+yP+7TrAQjMr6+PUge4TO7bMOdeesH9SsHy+n+v661X4LL4ha8CbB/jcbkHS9WeDzXMHc00f99gD3BNsxobavDdYhjVB0GAk9njtCK0WIiIiIgyqPRrrqThQr8a9wXJSXwfNbBZ+Qpt0E2v3LghSMfXl5OGqzBDE2pdqW4qEQEFNEclqvSfk6UNLsCwa5DWx8414bspE08ysr5nBRxIPiN7S63AsH+b8Pq4rB77YV0WCpOqxCX2+YmYVA9Q70c3B8l1mdtpAJ5pZ76TuMbHg5buD3pqH4Htv/nWQdQhD4rCg50KrhYiIiOSUg2mPBmKzplcPcG1sQqO393P8c/v57LDcCzThe5he3vtgMLnlp4e7UgfCzGYAC4NNtS1FQqCgpohku4+b2T1m9j4z6x7eYmbVZvYFYEmw656Ea5aZ2TfM7NhYI9S8RcAPg3OeCXos9lYH/NTMLgwaY5jZEcH9x+BnzPxxr2v+FSyvM7NTzcyC644F7ieeSL0vX8AnZp8NPBzM1B5J+I5nm9ldva75Ob5HQAQ/W+MVQdA19tuMNbMLzOxB4Ip+PvdvQCN+aPz1wb5/9vObDIqZlZrZ6Fgh3rAvTNw/wNv8/u47wsyuxOcuBd8z4LGDraeIiIjIATqY9ijA8mD5ATPL6+fef8H36JwfTKBTHdx7rJn9AD9BUXOSvkfSOOcagO8Gm18zs0+aWQmAmU3Bf68hzRBvZpFebcvEYfijeh07kPsWBh0D7sAHZaPs274XkWHQewYyEZFsY8AZQcHMmvDDQ6oTzrkpSN4eMxb4fFC6zKwOqCCYwRI/oc/F/XzeT/AN098APzOzNiA203YzcH4fgb+r8LMmTgYeBFrNrAs/03kL8A72beQC4Jx71czOAW4FFuBnam8zs1b6yZ/knOtIuOZE4HvAd81sb/AdE4OGD/Rzj2YzuwP4D+DoYPdQh57/F3B1H/vfGZSYXwEX9XOPz5jZpQnbxfT8HdYC5znnokOop4iIiMiBOJj2KPgc7icA/wlcambb8QHMvzjnPgPgnFtuZt/D92r8FPCpoE1XhQ+2XQxcA0xN/tcasv8BFuF/lx8A15pZIz6XfQfwHnx7FaDtIO4/BVjXz7HNvbb7G+I/2cxe73XeaOIdxDqAy51zS/e5UkRSTj01RSTb/R74KPAnYCW+4VEObMP3NjzHOfexXtecA3wT35tva3B+O/AS8C3gMOfcS/18XhtwGvBVYAN+iPoO/IzgRzvnHu59gXNuLb5B91t8T848fH6k3wHHOufuHegLOuceAOYA3waW4fNs5gNrgD/Qx3CkYDKiU/FByX8En1uOb6itwvfmfAvwjQE++ncJ643A3weq5zApo+csmBX4SZkeBT6Ln/F9bXjVExERkRx0MO1RnHO/DK57Gt++m4wPTvbuWXglcBnwItCKD3zeA5wewsSMgxbkmD8bX/9l+CBsF75NeQo9X67vHe76BSL0bFuOxf/Gq4Ab8O37n/Z/uYikkjnn9n+WiIgMyMxuBj4IfMU5d024tRERERERyWxm9gbgPmCDc25ayNURkTSknpoiIiIiIiIikm4+Gyz/NeBZIpKzFNQUERERERERkWFlZnlm9hczO8vMqhL2H2ZmfwHOxA/V/0FolRSRtKaJgkRERERERERkuBnwrqBgZvX4GEVpcDwKfMI593I41RORdKegpoiIiIiIiIgMty78BEdnAvPxk/Dk4SfbfBj4nnPuufCqJyLpThMFiYiIiIiIiIiISEZRTk0RERERERERERHJKApqioiIiIiIiIiISEZRUFNEREREREREREQyioKaIiIiIiIiIiIiklE0+/kwMbN1QCWwPuSqiIiIiOzPNKDeOTc97IrIgVO7U0RERDLINA6y3amg5vCpLCkpGTlv3ryRYVekoaEBgIqKipBrEo5c//6g30DfX98f9P1z9fuDfoPBfP+VK1fS0tIyXFWS5EtquzPX/8xkOj2/zKbnl/n0DDObnl/qDaXdqaDm8Fk/b968kUuXLg27Hjz44IMALFmyJNR6hCXXvz/oN9D3fxDQ99f3XxJqPcKU67/BYL7/woULee6559YPS4UkFZLa7sz1PzOZTs8vs+n5ZT49w8ym55d6Q2l3KqiZQmZWAcTC+QXRaDTM6oiIiIiIiIiIiGQFBTVT60rg6thGbW1tiFURERERkWyll+kiIiKSazT7eWpdC0wMyss1NTUhV0dEREREstSVwJagzNfLdBEREcl2CmqmkHOuwTm31Tm3FeiIRPRzi4iIiEhK6GW6iIiI5BQNPxcRERERyXDOuQagAcDM9DJdREREsp5aOyIiIiIiIiIiIpJRFNQUERERERERERGRjKLh5ymkWShFRERERERERESST0HN1LoSuDq2oVkoRURERCQV9DJdREREco2Gn6eWZqEUERERkeFwJbAlKPP1Ml1ERESynYKaKeSca3DObXXObQU0C6WIiIiIpIpepouIiEhO0fDzbBGNQv0WiOSB5QXLSK/tYJ9zYBZ2jUVEREQkSZxzDUADgJml9mV6az201cfblrESifTc7n3cTG1QERERSRoFNbNFewN87/BBnboEcBg83CvgGYn02h5ofx8B0373p+q+A+23fj+zes8ynEVgQ9H+65JfBBUT/HERERERgaW/hH99+eCutYECn5bQtut9jvUKoPa+rvc1iedYH591AIHYSB6MmgWHngPVU5L7W4qIiMhBU1AzW0S7Duh0w0G0E+iEA7s04y2IrbwwyAvyS2DMHBh7KIydF19WTlBvAxEREck9bgiTELno0K4P071XweTj4LBz4bB3QMW4sGskIiKS0xTUzCaVE31w03UlLKO9trsAF3ZNM0tnC2x7wZdERZVBkDMh0DlmHpSPCaOWIiIiIsOjoMyPZIkFKF3Q3nRRn+Yoti/a1fOcbGiDbnrKl7s/B9NOgsPPhXnnQNmosGsmIiKScxTUzBalI+H/rRjUqQ8+8ADmopx6ykm9gqDR+Hbieu9jAwVMU76/r3oc2P69u3cDUaoryvf/ee2N0Lyr7x+yrT7esO3xLEb3DHSOnQdj5kJJ9ZAesYiIiEhaOO4SXw5UYsCzv8CncwkB0r7Oid2jr3N637/3vaNBe6+/c1zfdepohlfvg9ce8Of7LwPrH/Hlrs/AjCVw+Ltg7tlq84mIiAwTBTVTyMwqgIpgsyAaTZOhNmY4y/P5InPQCw8+CMCSJUsGd0HTTti+Migr4uttdX2f37wz3shNVDkxIcgZW86BwrKD/i4iIiIikMbtzkSx3JfkhV2TA3f8x6FpF6y8A5bdCusfpbvnqeuC1+735c5CmPVGH+CcfRYUlYdabRERkWymoGZqXQlcHduora0NsSpy0MpGw/STfYlxDhq29Qxybl8JO1b5t/l9qd/iy6v3Jew0GDGtZ6/OsfNg1CGQX5jKbyUiIiLZRe3OVCsbBcd82Jf6bbDiDlj2V9j8dPycrnZY/Q9f8ktg9pk+wHnIm6CgJLy6i4iIZCEFNVPrWuCmYP3umpqa+WFWRpLIzE8UVDnBv42PiUZh74Z4r84dq/z6zjW+kbsPB3vW+bL6rvjuSL6fZTOxV+fYQ2HkdD8Dp4iIiEhPancOp8rxcPylvuzdCMtv8z04E3Owd7bAitt9KSz3Q9MPOxdmnq6X1yIiIkmgoGYKOecagAYAM+uIRCIh10hSLhLxgceR02HuW+L7uzpg99p9h7Hvfq3vGUCjnT4gumMVcFt8f34xjJ697wRFVZM1E7uIiEgOU7szRNVT4MQrfNn1mg9uLr/Vt/di2hvhpT/5UlwN897mJxmadgrk6Z9kIiIiB0N/g4oMh7wCnz9zzBw47B3x/R2tsOuVffN17t3Q9306W+H1l3xJVFAGow/x9x8925cxc2DkDP/ZIiIiIpJ6o2bCqZ/1ZftKH+Bc9lf/IjumdS88/xtfSkfDoef4IepTFvsX5CIiIjIoCmqKhKmgGMbN9yVRWyPsWB0PdO4Igp0N2/q+T0eTH+6UOOQJ/DD2kTN6BjpHzyavs5mu/NJUfCMRERERAT+a5vQvwmlfgG0v+t6by26Fuk3xc5p3wrM/96W8BsYvgLFz46NxRs9WLk4REZF+KKgpko6KymHSQl8SNe+O5+lMHMresrvv+0Q7fT7PnWt67D4ZaC0aBRuPCAKdh8DooCdp2RgNZRcRERFJFjOYsMCXN34FNj8TDFG/DRpfj5/XWAuv3ONL97URGDG954SSYw+FkTOVl1NERHKegpoimaR0JEw9wZdETTt9z86dq2HnK8H6mp49AXopbtsFax/wpceB6qBX52wf6IytV0/VJEUiIiIiQ2EGkxf5cubXYeMTfnj6ijugede+57uoH7q++zVYdWd8fyQfRh3Ss1fnmHmaVFJERHKKgpoi2aBstC/TTuy5v63R5+zsDnSuhh1riO56lYjr6vterXth89O+JMov9jOyd/fqDIKeo2ZqWJSIiIjIgYrkwbSTfHnzd/wL6R0re47I2b0WcPteG+305+5Y6Xt8xvQxqWRxy15ai8cM29cSEREZLgpqimSzonKYcJQvCR75930Ut77OcdOr9+3d2d7Y9706W6F2mS89GIyYmhDonB1fLxmRkq8lIiIiklXy8qHmUF8StTcHwc5VCZNKroK6jX3fp49JJY8HOvOK4dXD4z07xwTLinFKOyQiIhlLQU2RHOQi+bSUToJ5S4C3JhxwUL81noczFujcucbneer7brBnvS+JOaAAysb2HMoeC3pWTlQDWkRERGR/Ckvj+TgTtTX0nFQyVhJzdCbI72qFLc/6kqi4umeuzliws2xUKr6NiIhIUimomUJmVgFUBJsF0Wg0zOqI7J8ZVE30ZeZpPY+17OnZozMW9Ny7wed76kvTdl82PNpzf2F5fBj76EOCyYrm+DxQeQWp+W4iIiJZTO3OHFNUAZOO8SVR96SS8V6dHVtepKCzoe/7tO71eT03PtFzf9nYfXt1jp0LxVUp+ToiIiIHQ0HN1LoSuDq2UVvbX083kQxQMiKe2D5RR6tPXh8Ldu4IhrPvesUPgepLeyNsfd6XRJF8GDkj6N0ZTFIUK0XlqfleIiIi2UHtTulzUsnHHniAwva9nHDIiCDQucIPYd++Etr7CXY2bYd122Hdwz33V06M9+wcE1vOgcKyFH4pERGRvimomVrXAjcF63fX1NTMD7MyIilRUAw1h/mSKNoFezfuO5R9x2rfK6Av0c74+YkzfAJUTkro1ZkQ9Cwbo6HsIiIiandKf8xoLxoBM5b4EuMc1G1O6NkZLHeshs6Wvu9Vv8WXV+9L/ACfXz1xFvax83y7Lb8ohV9MRERynYKaKeScawAaAMysIxKJhFwjkWEUyfPDyUdOh9lnxvc7B007g5nYe/XurN/c//3qN/uy9oGe+4ure/bqjK1XT/F1EBERyQFqd8oBM4Pqyb4c8qb4/miXTy/Uu1fnzjUQ7ejjRgn51Vf/I+H+eTBqZs9A59hD/aicPP0zVEREhk5/m4jI8DKD8jG+TDup57G2xnhPzcTenbvX+l6cfWndC5ue8iVRfjGMmpUQ6AxyeI6amZKvJSIiIpIVInk+8DhyBsw9O76/q8O3yXpPTrT7tb7zq7uueJuOO+L78wp9+2zsvIR8nfOgeiooGC8iIgdAQU0RSR9F5TDxaF8SdXXA7nU9e3fuXON7d7Y39n2vzlaoXeZLD8axpZPYOfp4mDMSxs3X8HUREZE045zj5P99gOrSAiaPKGXSiBImjwyWI0qZOKKE0kL9U2ZY5RX4F8Vj5sBh74zv72j1udQTA53bV/jenn3pau+7jVZQGtw/oVfn2Lk+j6faaiIi0ge1BEQk/eUVwJjZvsx7W3y/cz6vU2z4+s7VsGONXzbt6OdmjrLmTZRt3AQ33gIjpsOhb4dDz4EJR6vRLCIikgbqWjrYvKeFzXtaWLalvs9zRpcXMnFEKZNHlDBpRCmTRwbLESVMqC6huEBpaIZFQbF/STyuVxrXtkbfJusR7FwJDVv7vk9Hc98TSRZV7turc/RsKK9Rz04RkRynoKaIZC4zqJrky6w39DzWvDsh0Lkadr3ql3s39BwitWcdPPZ9X6omw7wgwDnpWDWURUREQrJ5Tz8T1STY2djOzsZ2Xty0t8/jNZVF3UHOnkHPUsZXF1OQp7/nU6qoHCYu9CVRy95gcqKEXp07VvX/Qrqtvu9UQ5ECqJwQbwtOWQzzz/efKyIiOUFBTRHJTqUjYcpxviRqb2bZHd9nzI7Hqdn7PLQ3xI/VbYInr/elYrzvFXroOb6RrEmHREREhs2h4yt5+gtvYNOeZjbvaWHT7mAZbG/Z00Jn1A14j9r6Nmrr21i6Yc8+xyIG46tKmBgMZ+8xvH1kKeMqi8mLaPRGSpRUw5TjfUnUuAN2rIzPwr59pd9urev7PtEO/7I6Nsz9pT/Bv74MC/4Djr0YRs9K6dcQEZHwKagpIrmlsJSdYxazc8xiak48HtY+CCvugNV39Ww0N2yDp2/ypWwMzH2rD3BOO1kzdoqIiKRYJGKMrSxmbGUxC6fue7wr6qitb90n2Bnb3lbXwkAxz6iDLXtb2LK3hafX7d7neH7EmFBd0p3Ds3fQc0x5EREFPZMrNpHk9FPi+5zzbbLEWdi3r/AjbVr2DVbTVg9P/cSXmafDsR+F2Wfq5bSISJbSv8xFJHcVFMOcs3zpbId1D8PKO2DlndCS8A+cph2w9Je+lIyEuW+BQ98B00+F/MLQqi8iIpKr8oKg44TqEo7r43hHV5TX6/oPetY2tOIGCHp2Rh0bdzezcXczsGuf44X5ESZVBz09EyYwigU9R5UVYsrTPXRmfoh55QSY9caex9qboG4L1G+G2uWw9GafbijmtX/7UjUFjv0IHP0BP5JHRESyhoKaIiLgg5OHvNGXs78LGx7zPThX/h2atsfPa9kNz//Wl6IqmPNm34Nz5uk+SCoiIiKhK8iLMHlkKZNHlvZ5vK2zi617ewY9Y+ub9zSzs7F9wPu3d0ZZu7OJtTub+jxeUpDHpBEl+8zaHsvtWVVSoKDnUBWWxSeSnHk6HH85rHsQnv4prLk7nkO9biPcdzU88A2Yf54fmj7x6FCrLiIiyaGgpvDq9gbuXvY6xQV5FOVHKAqWxQV53fuKe+yLUJQfXyrfkGSdvHyYcaovb/mOT0y/4g5Y8beeM3a21cFLf/SlsNwPbzr0HJj1Jijs+x9RIiIiEr6i/Dymjy5j+uiyPo+3tHexeU88yLkpttztl3uaOwa8f0tHF69sb+SV7Y19Hi8vyueU2aO57t0LNEt7skQiPrg583TYswGe/QU89+v46JuuNnjhd75MPAYWXQKHvQPyi0KttoiIHDwFNYXlW+v5v3vXHPT1BXnWI8hZVBChOGHZOwhaXBDpM4B6IIHUovyI8hjJ8IjkwdQTfDnzm7Dl2XiAs25j/Lz2Rlj2V18KSv0QqUPP8YHOoorw6i8iIiIHrKQwj0NqKjikpu+/wxtaO9iyt6U7yLlpd3yI++bdzTS0dQ54/8a2Tv7x8uuccsgW3rtoSiq+Qm4bMRXe9BVY8nlYfqvPkb71+fjxLc/Cbc/CPV+AhR+EYz7sZ1AXEZGMoqCm0NYZHdL1HV2Ojq5OGtuSVKFBKsyL+MDpIIKgiQHX1ze3U5AHa/PXJQReE+7RT1C2qCBCUX5EQ4VyWSQCkxf5csbXYNsLQYDzDti9Nn5eRzOs/JsveUUw6w1w0v+DyceGVnURERFJnoriAuaOK2DuuMp9jjnnqG/pDIKc+87evml3Cy0dXQA8vX63gpqpVFAMC97ny+alPri5/FboCtILNO+ER66FR78Lc97ie29OP8Xn8hQRkbSnoGYKmVkFEHu9WxCNDi14mCpzx1Xw8SUzaeuI0trZlbDsoq0zSmsfy9aOKG2dfhmW9q4o7V1RGloHfhPerzUrDuqy3gHU/gKqRX30TE1clhTkMbayiAlVJYyrKtbQo0xjBhOO8uUNV/sE9bEA587V8fO62mD1P3yOzitXQ0FJeHUWERGRlDMzqkoLqCqt4vCJVfscX7phN+/6yRMArNhaP9zVy12TFsKkG/2L6ed/Dc/8wk8yBD7/5qo7fRk9BxZ9FI58r0bbiIikOQU1U+tK4OrYRm1tbYhV6d8Rk6o5YlL1QV3rnKO9K+qDnIMMgsaWvc9LvL5ncNUvY5/R2hmlfYi9S4eirTM65N6tfRlVVsj46mImVPmZPMdXFTO+uoQJwbKmooj8vEjSP1eSwAzGHe7L6V+E7at8L80Vd0DtMn9Oax1seQ6mnRhuXUVEJCtlyst0gaqSwu719i49p2FXPgZOvhJOuMJPKPT0TbDuofjxnavhH5+B+67xvTdnngbTT4WqiaFVWURE+qagZmpdC9wUrN9dU1MzP8zKpIKZBTku86CkYNg+Nxr1wdR4wHNwgdS2zi5Wv7KW9ijUjJ/Y6/z+7hEPuKay4bmrqZ1dTe0s29L3G/uIQU1lcc9gZxAAnVDt10eVFSrXaDoYO9eXU/8L7rjcz5QOsPEJBTVFRCRVMuJluvRU39LB/StrOXxiFWMripTmaDjl5cO8t/qyYzU88zN44fc+Tzr45ct/9gVg9GyYscSXaSdB8b69cEVEZHgpqJlCzrkGoAHAzDoiEfWyS5ZIxCiO+FyYVRxYMPVB/DCTJUsOO+DP7Yo62vvrTRr0Im3rY7lPD9aOKI3tnbxe18q2vS3UNrTRFXUDfnbUwba6VrbVtcLGvX2eU5gXYVyVD3xOrC5hfHUs8Bksq0qoLNEf+2E15YR4UHPTU+HWRUREslnWv0zPFolxy52N7XzkV88CMLq8iMMmVHL4xEoOn+CHrk8aUaJA53AYMwfe8h14w5fhxT/C0z/tmU4IYOcaX56+CSwCExfGg5yTjtUs6iIiIVB0Q+QA5EWMksI8SgqTm/+ysyvK9oY2ttW1sHVva4/ltrpWtu5tYWdj+37v094VZePuZjbubu73nLLCPKoKoowsjvDPnS91D3kfX13se31WlST9++W0KcfH1zc9BdGon3BIREQkifQyPXNMHVnK5JElbNrd0mP/zsY2Hlqzg4fW7OjeV1mcz2ETqnygc2IVh02oZProcvI0Mic1iip8Ps1jL/YTQq590JcNT/g86TEuCpuf8eXh70BBKUw9wQc4p58KNYervSciMgwU1BRJA/l5kWAYeQkLp/Z9TltnF6/Xte4T7Ny6N75eP4hJk5rau2hqh61NXSzbtanPc6pLC4KencU9gp3jq/x6TWUxhflqqA3KyBlQOtrPrtla59/6j50Xdq1EREQkJPl5Ee761Mncs+x1lm+tZ/nWOpZvrae5vWufc+tbO3li7S6eWLure19JQR6HTqjk8AmVHDahisMmVnLI2Aq1zZIpcULIkz4NHS3+5XQsyLn1BSBhlFVHM7x6ny8ApaN8cDPWk3NEPw18EREZEgU1RTJEUX4eU0eVMXVUWb/nNLV1dvfy3Lq3ha3B8PZtda1srWth295WWjr2bTD3tre5g73NHazc1nd+TzM/RGpCj9yefhnr+Tmmoki9CMD/WFOO97NpAmx8UkFNERGRHFdZXMD5x0zm/GC7K+pYv6uJZVvqWLG1nmVb61i2pZ66lo59rm3p6GLphj0s3bCne19hXoQ54yo4bEIlh02s4vAJlcwbX0lxgUbfJEVBSTxACdC8G9Y/Eg9y7l7b8/zmXbD8Vl8ARkyPXz/9FCgdOVw1FxHJagpqimSRsqJ8Zo2tYNbYij6PO+eoa+ngjvseYXerY/TkWd2Bz61Bb8/a+lY6ugbO7+kc7GhoY0dDGy/23dmT/IhRU1m8T7AzNrP7hOoSRpQW5EaeqMnHxYOam56CYz4Ubn1EREQkreRFjJljypk5ppxzFvhZtp1zbNnbwrItvjfnsi11LNtaz46Gtn2ub++K8vKWOl7eUgfPbEq4Zxmj89qYWhmhZO0uDp1QSUXx8E3umbVKR8Kh5/gCsGeDn0F97YOw9iE/QifRnnWwdB0s/SVgMP7IeJBzyvE+aCoiIgdMQU2RHGJmVJcWMrUyj6mVsGTxtH3OiUYdOxvbegQ7/TKe53N7Qxtu4LgnnVHfEN+ytwXY0+c5xQURH/CsKo73+kwIeo6vKs6OhveUxfH1jU+GVw8RERHJGGbGpBGlTBpRylmHj+vev72+leVb61m2xQ9bX7a1js17Wva5vivqWFPbyBrg8a3wh1W+DTJtVGnQm7MqmJioipFlhcP1tbLTiKkw4gNw9Ad8/vTtyxPycT7uh6d3cz5f57YX4LHvQV6RD2zGgpzjj4SIetiKiAyGgpoi0kMkYoytLGZsZTELJlf3eU5HV5Ta+taEvJ77Tm60u2n/Exu1dkRZt7OJdTub+j2noji/eyKjeJ5PP9x9QlUJ46qK039o1fgjIb8YOlv9m/rG7VA+NuxaiYiISAaKtdNOmxtvS+xtbu/Oz7lsiw90rtvZ1OdL6PW7mlm/q5m7XtrWvW9CVTGHBRMRxWZer6ksyo0RNckWicC4+b6c8EnobPMTCsWCnFuW+omGYrrafC/PdQ/B/V+B4mo/RD0W5HTOpzMSEZF9KKgpIgesIC/S3XOgPy3tXT0mNNpW19oj3+e2ulYa2/Y/sVFDayerWxtYXdvQ7zmjygq7g56TRpTw5sPHc+y0EenTEM8vhAlHw8bH/fbGJ+HQt4dbJxEREcka1aWFnDhrNCfOGt29r6mtk5Xb6rntoaVsqI+ys6uEV7c30hndN9K5ta6VrXWt/GtFbfe+0eWFHDrB5+c8POjZOXlkSfq0rzJFfhFMO8mX06/yE0eufzQe5Ny5puf5rXth5d98AY4vGsueEUfAqJ1+8qHyMcP9DURE0paCmiKSEiWFecwYU86MMeX9nlPf2sG2vfFJjLbV+eHqsfWtda20d0b7vT5mV1M7u5raWbbFT2z0y8fWc+Tkaj52ygzOPGxcekxYNOU4BTVFRERk2JQV5XPMtJE0rvepfJYsOYXWji7W1DZ09+ZcvrWeldvq+2xv7Wxs5+E1O3h4zY7ufRXF+T16cx4+sZLpo8vTo62VKYqrYO7ZvgDUbQnycQY5ORtf73l623bGv34f/DWYWb1mPsw4FWacBlMXQ2H/k4iKiGQ7BTVFJDSVxQVUjitgzrj+Jzba3dTOtrrWINgZm8k9Pqv76/WtdPXR4+DFTXu57HfPMWVkKRefPJ3zF06mpDDEYeqTj4+vb1JeTRERERl+xQV5HDGpmiMmVXfv6+iK8tqOxu4JiZYHy6b2rn2ub2jt5Mm1u3ly7e7ufSUFecwbX9Hdm/PQCZXMrqmgMD8yHF8p81VNhAXv88U52LE63otz/aPQ3mu0Uu3LvjzxI4gU+AkpY0PVJxwFefonvojkDv0fT0TSlpkxqryIUeVFHD6xqs9zuqKO7Q2t3fk8H31lJ7c+v6W7x8HG3c18+Y7lfPdfa3j/8VP5wAnThvEbJJi8KL6+7UV45FoYMQ2qp/nk8qWjlC9JREREhl1BXoS54yqZO66S8xZOAvzEket3NXVPRLQ86Nm5t7ljn+tbOrp4buNentu4N+GexpxxFd2TER02sYp54yrDfcGcCcxg7Fxfjr8Uujp47s6fMWLPi0yPboDNT0M0IX1TtAM2POrLA1+DokqYdnLQk3MJjJ6t9qWIZDUFNUUko+VFLJhBvQQYwVuPmMCVZ8zh10+s59dPbKCuxTe+9zR38IN/v8oND6/lhHERzpo+zLOql46E0XNg52rfGL3/qz2PF5ZD9VQf4OyxnObXNbRIREREhkkkYt1phN525ATAj6DZWtfqZ11PmHm9tr5tn+s7upwf4h6kBgKIGMwaW85hCbOuHzqhksriYW6TZZK8Auqr5lFfNY/pS5ZAW6OfTT3Wk3P78p7nt9XD6rt8AagYH+/FOf1UqBw/rNUXEUk1BTVFJOuMqSjiyjPm8PElM/nzM5v42aPr2LynBYD2zigPbo7y0OZO7t/1LB87ZQbHTBs5PBU74t3w7//p+1h7o2+Y9m6cxpSO9sHNEdP2DX5WTYY8/YNAREREUsfMmFhdwsTqEs48bFz3/u0NrSzfWs+KrfUs21LHsq11bNrdss/1UQdrahtZU9vIbc9v6d4/dVSp79E5sbK7Z+eo8qJh+U4Zp6gcZp/hC0BDLax7OAhyPgD1W3qe37ANXvyDLwBj5saDnFNPhOLKYay8iEjyKagpIlmrtDCfi06czoXHT+Xu5a9z08NreWlzHQAO+NeKWv61opajp1RzySkzeNOhKZ5U6OQrYcpiqF0GezbAnvWwd4Nf750vqbfmnb5sWbrvMYtA5aSegc7E4Gd5jYYeiYiISEqMrShm7JxiTpsztntfXXMHy7fFh60v21LH2p1NuH3ToLNhVzMbdjVz18vbuveNryrmsAl+IqJYz87xVcWaeb23iho44nxfnINdr/ng5toHYd0j0FbX8/wdq3x56gawPJh0TDzIOfEYyC8M4UuIiBw8BTVFJOvl50V46xETOHv+eJ5at5tv3vYML+6IJ79/buNeLv3tc0wbVcpHTp7B+QsnUVyQgpxPZjDtRF8SOQcte2DPOh/gjAU698YCn5t8zqT+uCjUbfSFR/Y9nl/co3fnpN2dtBbXwLaRfl9x3/lKRURERA5GVWkBJ8wczQkzR3fva2rrZNXr9cGwdD98fU1tA519TPi4ra6VbXWt3LeytntfcUGEySNKmTyylCkj40u/XkJpYY7/09YMRs/yZdFHoavT53GPBTk3PQVd7fHzXZfft+kpeOjbUFDm26ixIOfYQ/VSXETSXo7/n19EcomZcfyMUXx6YTFbGqO82DqK25/fSnuXn1Ro/a5mvnT7Mr77rzV8YPFU3n/81OEZ/mTmc26WjoSJC/c9Hu3yw4d69+6MLRu2Dnz/zlafy3PnagBmxfYv/5ZfFlfvm8MzNoFR1WQoKE7K1xQREZHcVVaUz8KpI1k4NZ72p62zizWvN/rJiLbWsWxLPSu31dMWTPiYqLUjyivbG3lle2Of9x9dXsSUkSV9Bj1rKotTOxonHeXlw6SFvpzyGWhvho1PxPNxvv5Sz/M7muCVe30BKBsbn3BoxhKomjS89RcRGQQFNftgZtcAV/faXeucG9fH6SKSgSaWR/iPtx7JZ86Yw82Pr+e3T26gvtXPJrm7qZ3v3fcKP3nwNU6aJCAkNAAAsk1JREFUNZrSonyK8yOUFOZRXJBHcX6EooJgvSBCcX58vaQgLzgWCfb582PrB9WgjuT5hmTVpH17eQJ0tELdpiDQud4HPhODnq17B75/617Ytte/zd+H+STz/Q1trxjv6yciIiJygIry85g/qYr5k+KjRjq7ory2o6k7yLlsax2rX2/onvyxPzsb29jZ2NZjFvaYwrwIk0YkBjxLegQ+K3JhsqLCUpj1Bl8AmnbBuofiQc69G3qe37QdXr7FF4BRs+IBzmknQcmI4au7iEg/FNTs32pgScJ2Vz/niUgGG1tZzH+dNZfLT5vFn57ZxM8fXceWvT65fVtnlPtXbU/q5xXmRSjqDnj2DIiWF+VzwaIpnHHYAb4/KSiG0Yf40peWvT16d25Z9jjFrbWMijTA3o2+J2e/nO8J2rDVv93vLVIA1ZP7nsBoxHTf4NXQJRERERmk/LwIc8ZVMGdcBeceHd9f19LBpt3NbNrdzMaEsnlPC5v3NNPR1UfCzkB7V5S1O5tYu7Opz+MjSgt6BDkTe3mOryomPy+S7K8ZvrJRcPi5vgDsXpcQ5HwIWnb3PH/Xq7488zOfz33CUfEg56RFGtkjIqFQULN/nc6518OuhIgMj7KifD580nQ+sHgq/1j2Ojc9/BrLttQn/XPau6K0d0VpCHqF9vbwKzv5zUcW9chBNWQl1b6MPxKAV9rnA7BkyRKIRv2b+N69O2P5POu3+Jyd/Yl2wO61vvSlsKKPoe0Jwc/C0uR9TxGRDKPRQSKDV1VSQNXEKg6fuG8u8K6o4/X6Vjbu6ivo2czOxvY+7hi3p7mDPc11vLi5bp9jeRE/63vPoGdJd9CzqqQgOyYwGjndl4UX+fZh7cvxXpwbHu/5EtxF/eSVW5bCI9dCfglMXRwPctbMh0gWBoJFJO1kZFDTzM4DTgUWAEcCFcDvnHMXDnDNJOCrwFnAKGAbcDvwFefcnj4umWFmW4B24CngC865fv7VLiLZIj8vwtuPnMDbjhjPsi31bNnbQltnF60dXbR2RLuXLR1+nz8W29/7WM/9rZ1dfc76magr6rj8d89xx+UnMWXUMAT8IhGoGOfLlOP3Pd7ZDvWb+8/n2bxz4Pu3N/jZ3muX9X28bOy+gc5Y8LNyks8HJSKS3TQ6SGSIYoHHidUlLJ45ap/jTW2dbNrTzMZdPtCZGPjctKeF9j5yeMZ0RV33uX2pKM7vDnBOGVnKnHEVvPWICRTmZ3BQLxLxL8PHHwknXuFTHW1+Oh7k3Pp8z5fenS3w2r99ASgZ2TMf54hpw/4VRCQ3ZOq/Fq/CBzMbgc3A3IFONrOZwOPAWOAOYBWwCLgCOMvMTnTO7Uq45CngouC8scHnPW5mh/U6T0SylJntk+NpqJxztHdFaW33Ac7EQGl9awf/788vsqOhjT3NHXz018/y18tOoLwo5P9N5xfCyBm+9KWtcd/enYlBz46+h3l1a9ruy+Zn9j1meVA10TeER86Emaf7UlQ+1G8lIpJONDpIJMXKivKZO66SueMq9zkWjTp2NLb5wGUfQc/tDW0D3ruhtZPlW+tZvjU+wuc3T27g9xcfT0lhluQdLyiG6af48oYvQ8seWP9oPMi569We57fshuW3+QK+Ldedj/MUP/RdRCQJMjWo+Wl8MPNVfI/NB/Zz/o/xwclPOed+GNtpZtcF9/o6cGlsv3Pun4kXm9mTwFrgg8B1Sai/iOQgM6MoP4+i/Dyq2Dch/Y3vX8h7b3yS9q4oq2sb+PSfXuDGCxcSSefZOovKoeYwX3pzDpp2xoOdsUBnbL1uM0T7Hobvr+/yOT/3boR1D8PSX0JekX/zP+ctcNg7lKReRFJKo4NEsl8kYtRUFlNTWcyx00buc7y1o4vNe5oTgp4t3YHPTXuaaW7ft3P18xv38sk/PMcNFy7MznycJSNg3tt8Adi7qeekQ007ep6/Zz0svdkXDMbNjwc5pyxWOiIROWgZGdR0znUHMfeXv8TMZgBnAOuB63sdvhq4BHi/mV3pnOuzS5FzrtHMlgP9zMIhIjJ0R08ZwTfOnc9nbvGzkP9rRS3fvW8NV54xJ+SaHSQzKB/jy6Rj9j3e1eknIOovn2djbR/XtMEr9/ry7/+Bs6+Fw96Z6m8iIrlLo4NEclxxQR6zxlYwa2zFPsecc+xqau8Ocj63YQ+/esLPIn7fyu186Y5lfOOd87Mj5+ZAqifDURf64hxsXxEPcK5/rNfIHQevv+TL4z+AvEKYfFx8VvVRh0DpSE00KSKDkpFBzQN0erC817mes1045xrM7DF80PN44P6+bmBmxfhG7P56hGJmS/s5NLehoYEHH3xwsPVOmYaGBoC0qEsYcv37g36DdP7+o4GzpuVz93rfg/GH/36Vzl2bOG588v53nZ7ffzJEJsOok3y/JiDS1UZx63aKW2upqlvFqF1PU960IX5J8y645SJ2PHAja2ZfSkdh9aA+KT2///DJ9e8P+g0G8/1j54hGB4lI/8yM0eVFjC4v4ugpIzhnwURKCvO54aHXAPjD05sYX1XCp96QQ31jzOKjeBZf7vOzb3k2HuTc/KwfjRPT1Q7rH/ElprjKpx4aNRNGzfLphyYdq0CniOwjF4KasS5Oa/o5/go+qDmbIKhpZv8H/B3YiG+YfgkoA36V0pqKiADvnlPIlkbHyzt9g+/nL7dRU2pMq8qSvEyDFM0rorlsMs1lk9k96hjWzbiQ4pbXGb3zaSZtvp3iNt+JaczOJ6jeu4xXDrmE7WNPVoNXRJJGo4NE5ED915lzqK1v5bbntwBw3b/WMK6ymHcfOznkmoUkvxCmnuDLaV+A1nrY8Fg8yLlj1b7XtNbB1ud8AXjwm37I+rEXw/zzobBsOL+BiKSxXAhqxmb5qOvneGx/dcK+ScAf8J2mdgBPAsc75zawH865hX3tN7OlFRUVRy9ZsmQQVU6tWM+MdKhLGHL9+4N+g0z4/guP7+Cd1z/G2p1NtEfhxhXwt08sZkxF0ZDvnQnff2Dvhdar4d6r4LlfA1DQ2cChK6/lULcazr4OKmr6vTrzv//Q5Pr3B/0Gg/n+FRX7DrOU/RrW0UHB+cMyQijXezdnOj2/4Xf2GMeaURGW7/L/K/jcrS+xbf1qjhxz4P/8zs7nVwIlb4bD3kxh2y5G7HmJEXtepLxxPSUtW8mL9jE50+svw9+voPMfn+f1caezdcKbaS6bNPxVPwjZ+Qxzh55f6g1lhFAWZi0+YLHX7i62wzn3XufcBOdcoXNuonPuXc65FSHVT0RyUFVJAT/94DFUFPvG77a6Vi797VLaOvdNRp+Tiqvg7T+EC2+FqoSeD6vuhB8fBy/9Oby6iUiuGszoIPCjgwA/OsjMTjWz6WZ2HPAXNDpIJOPlR4xPHFXMlAr/z+2og+tfaGNtndpxvbUXjaJ23GmsmvefPHvs93jk5D/x+OJf8PyCr7N69uVsG/cGuiKF3efndzUzacudLHrmco584UuM3vE4NtDEkyKS1XKhp2asJ2ZVP8cre52XNGZWgZ8lE6AgGo0OdLqISA8zx5Tzo/cdzYd++TRRB0s37OGq25bxv+cdkf0J5wdr1hvg44/DfVfDs7/w+1r2wK0f9UOb3vy/kD/03q0iIoMwrKODYPhGCOV67+ZMp+cXnoWLWnnnjx9ny94W2rvg+pei3HrZ8UwdNfjh03p++LbdC3+AZ34Gu1/r3j1i70uM2PsSVIyHhRfBcR/zM7OnGT3DzKbnl3pDGSGUCz01VwfL2f0cj+Us6u+t+lBcCWwJyvza2j5m8hURGcCps8fwhbfM696+Zelmvnj7Mn7z5Ab+9uJWHlqzgxc27WXdziZ2N7XT2ZWDL0+KK+Gt34UP3AFVU+L7l94MN78V6reFVjURkQQaHSSSY8ZWFvOrDy+iurQAgF1N7XzgF0+zs7GP4dXSv5IRsPgy+MSz8P7bYe5bwRJCGQ3bfN7NHx0LL//Fz8AuIjkhF3pqxnISnWFmkcQcR0FPyhOBFvyb8WS7lv/P3n3HR1Wlfxz/nPSQDoGEUBI6SC/SO3YsKLo2VOy9rbvqrrqoq66uP3tZxYZrwxVR17oK0gSkSgfpNfSQAumZ8/vjJpMEAgQyk8kk3/frNa/ce+fee84kQB6ee85zYHzx9g8JCQmdvdCGiNRy1w9sweqdWXy+eDsAH8/beszzI0ODiA4LIjo8mJgKXru3FxARbLC/7zniveBAP37W1XIo3DYHvr4HVkxyjm2fD+OHwB8+gOZ9fNk7Ean9fDY7CDRDSKSmat0okneu6cUVb80jr9DFlv3ZXD9hAZ/c1Jd6IXXhv+MeFBAArYY5r4ztzgPsRe/DoT3O+4f2wufXw28fwsjnnNXTRaRWq/X/ilprNxhjfsQpzH478EqZtx/DqVv05tFWoaxi21lAFoAxpiAgwI+TBSLiM8YYnrywE5v2HWTx1vTjnn8wr5CDeYWkZuQe87w3ly044li9kEB3gvNoSdGY8GCSG9SjY1IMIUE17N+10CgY/TYkdYefHgHrgoO7YcJIOOef0Os6X/dQRGovX84OAmeG0LiSHc0QEqk5eibX5+XLu3Prh4twWVi6PYPbP1rMW1f3IsifHyj7UkxTGP4wDL4fVn4BUx6FrFTnvY3T4PV+MOTP0P9uZwV2EamV/DKpaYwZBYwq3k0s/trPGDOheHuftfZPZS65DZgDvGyMGQGsBvoAw3ACy4e83GURkSoJCw7k/et6M2nRdnZn5pGRU0BmTgEZh70ycwuqNOMmO7+I7Pwidh4nIQoQGhRA12ax9EqOo1dKHD2b1yemeHqVTxkD/e+AxE7w2bWQkwauAvjmXkhdgok8DxtQA/opIrWNL2cHgWYIidRoZ3ZM5PELOvHwlysAmPb7Xh76YgVPj+6sWulVERQCXS+FdmfDtCdh/njnoXZRHvz8BCz7zClTlDLA1z0VES/wy6Qm0A245rBjLYtfAFsAd1KzeLRmL+Bx4CzgHGAn8DLwmLU2zRud1DQgEfGkqLBgrh3Q4pjnuFyWrLzCChOeJa/VG7aQXWAJjYo74jzXCSRE8wpdzN+UxvxNpf+EtmkUSa+U+u5EZ/P69XwXqLccCjdNh0+vhF3LnWOL36d71Fy2Nr8QsrtAvfq+6ZuI1Dq+nB1U3L5mCInUcGP6JrMrI5dXp60H4NOF20iMCePe0482wFsqLSwazn4Gul7mlCLaucQ5vu93mHAOdBsDZ/xdsZ9ILeOXSU1r7aPAoyd4zTbgWm/05xg0DUhEqlVAgHFPEW92lHOmT98FwNCh5WtMWms5mFdYfuRnBUnRA4cKWL4jg61p2Ufce92eg6zbc5BP5jt1P+MjQ0tHcibHVf+U9bhkuO5H+PouWP4ZANFZa+m08hlY+U9nNGeLIc4q6i2HOaM8RUSK+dPsID1MF/EP953Rlp0Zue5a6S9NXcfs9fuIDAsiPDiQ8JBA6oUEUi8kiLDgQHZtyyck0LB34Tb3e+HBQc7XkEDCg8ueH6BRn0nd4cafYf5bzkjN/Czn+JIPYe33cMYT0PVyxXwitYRfJjX9iKYBiYjfMMYQFRZMVFgwTeOOf/6ezFwWbTnAgs0HWLQljZWpmRQeNtRz38E8fli5ix9WOonUsOAAujaNpVdKHL2S69OjeZz3p6yH1IOL3nKC3B8fAVtU/IZ1RnDuWg5zX4VOo+HC8RCoX40i4tYNP5gdVEwP00X8gDGGp0d3Zu/BPGau3QvAwi0Hjnvdh6uXVer+JUnOsgnP8OKkZ9mkafn3g8qfW+68IOoFBxIZFuQ/C0oGBELfW+CU8+H7B2D1f53j2fvhy1thyccw8nloqBGyIv5O/3PzIk0DEpHarFF0GGd3bszZnRsDkJNfxJJt6SzaksbCLQdYtOUAWbmF5a7JLXAxb1Ma8zalARsAaJtQZsp6cn2a1Q/3/CgDY6Df7dBiCFu+e4HY9OXEZK0vk+AEVnwOriJnoaFA1dwUEb+aHQR6mC7iN4IDA/jXlT0Y8848fqvEIpAnIqegiJyCIvBwoYvgQEP7xGi6NI2ha9NYujSLoU2jKAIDavCIx+gkuPQD+P0H+O5PkLHNOb55FrwxAAbcA/1ug/BKPM0XkRpJSU0REfGI8JBA+rVqQL9WDQCnvufaPVks3OwkOBduSWNbWs4R163dfZC1uw/y8TxnynrDKGfKes/kOE5Nqc8pSdGeGxmQ2IlNLa8CYGjfHrB1Liz71EloAqz60ikuP/odrZQpIn5FD9NF/EtEaBD/ubkfv+/KIjO3gNyCIveCjTn5TmIyO7+ItRs2kVcEcQ0TyDni/cJy5+YVeq/sREGRZfmODJbvyOCj4pgtPDiQTk2i6dI01p3sTG7gw3rqR9PuLGgxCKY/DXNfcx5qF+XDzH/CzGehUQdo1gea93Vescmani7iJ5TU9CLVNhKRuiwgwHmi3z4xmjF9kwHYnZnLws1OgnPRlgOsTM2k6LAp63uz8vh+xS6+X+FMWQ8PDqRrsxh6JdenZ0qcM2U93AMjKcOioe2Z0OYMiGgE8/7lHF/9X3ilB/S6FnpcAxHxVW9LRERE5DDBgQF0ahJzzHOmB6cCMHRot+Per8hlnZGaxYnP7IJCsvOLyC1OhmYXFJFTnAjNLnde8df8QnIKXOTkFx6RXM3IKTiivZyCIhZsdkoRlYgJD6ZL05jiVyxdm8aSGBN2Yt8YbwiJcBYK6nIpfHMPbF9Q/IaFPauc16L3nEORiZDcD3pd7yRDRaTGUlLTu1TbSESkjIToMEZ2aczILs6U9ez8QmfK+uYDLNhygN+2HCArr/yU9ZyCIn7dmMavG51SdMZAu4QoehYvQNQruT5N46owZd0YOOsfYALg19ecYxnbYOrjzhP9jhdB7xuhSU89tRcREZEaKzDAEBkaRGSo5/+bn56dz7LtGSzbns7S4q+7M/OOOC8jp4BZ6/Yxa90+97FGUaHFCc4YujRzvsbW89GMmMROziKSv/0bFr0PO5eWL0cEcHAXrPzCebUcCsMehman+qS7InJsSmp6l2obiYgcQ72QIPq3iqd/K2c0ZJHLsnZ3llOTc3MaCzYfYEd6+Snr1sKaXVms2ZXlnv7UKCrUneDslRJHh8YnOGXdGDjzSYhuDLOeh5zitTyK8mHZROfVuBv0vgk6XQTB4Z74+CIiHqMZQiLiTbH1QhjctiGD2zZ0H9udmcvSbeks257B0u3O14pGdO7JymPK6t1MWV06yCe5Qb3SRGfTWDo1iaZeSDWlJwICoOdY55V/CHYsgq3znLJE2xdAXmbpuRunO6+2Z8Gwh6Bxl+rpo4hUipKaXqTaRiIiJyYwwNChcTQdGkdzVfGU9V0ZuSzckuauzblq55FT1vdk5fHd8l18t7x0ynq3Zs4q6z2T4+iRHEd02HGmrBsD/e+EU290nswveMsJckvsXAJf3QY/PgTdr4JTr4e4FA9+ehGRKtEMIRGpVgnRYZzRMZEzOiYCYK1la1q2M5KzONm5IjWD7PyiI67dsj+bLfuz+XqpM70+wECbRlHOtPXi0ZztE6MJCfLy/6FDIqDFYOcFzqKRu1fC/PHOKuklozjX/uC8ThkFw/4KDdt5t18iUilKaoqISI2WGBPGuV2SOLdLEgCH8pwp6yW1OX/bms7BCqasz924n7kb9wOlU9Z7pcRRL7uQTg0Cj95gcBh0u9x57VgE8992FhIqKp5ilXMA5rwMc15xanKeeiO0Gu489RcR8R3NEBIRnzLGkNwgguQGEZzf1YnbilyW9XsOFo/kdBKdq3dmUlBU/gG1y8Lvu7P4fXcWny3aDkBIYAAdGkeVLkTULJZWDSO9u+J6QKAzGvOCV53V0Wc8DcsnAcX9XfWlU3+9y6Uw5AGo38J7fRGR41JS04s0DUhExPMiQoMY0DqeAa1Lp6z/viuLRVuc6eqLthx7yjpAkIFfspZy/cCWtEuMOqINtyY94cKecMYT8NsHsPAdSN9actfSp/b1WzrF5LtfCeFx3vjYIiLHpBlCIlITBQYY2iVG0S4xij/0agZAXmERa3ZmlavPuW7PQWz5PCf5RS6Wbs9g6fYM97GIkEA6NolxT1vv2jSWZvWrUFv9WOJbw+i3YeC9MO0pWPONc9y6YOknsPwz6D4GBt8PMU08376IHJeSmt6laUAiIl4WGGA4JSmaU5KiuapfCgA7M3Lc09UXbkljVWomZWesF1r4z8Lt/Gfhdga3bcgNA1swqE380QPiiAYw8B5nevq6n5yp6eunlL6fttGZlv7zE9DlEuh9s1OIXkRERETKCQ0KpGuzWLo2i+Wq4mOH8gpZsSOjXH3OrWnZR1x7KL+I+ZvSmL8pzX0srl4wnYvrc3ZtGkuXZjE0ivLgiusJHeGyj2DHYpj2ZGkM6CqERRNgySfQ6zoY9EeIbOS5dkXkuJTU9C5NAxIR8YHGMeGc1zWc84qnPh3MK2TJ1nQWbknjqwUb2JRROnJ+5tq9zFy7l3YJUVw/qAUXdEsiNOgo09MDAqHdWc5r/wZY+K4zgjO3eARBYQ4s/jcs/gC6Xg4jHoHoJG9/XBERERG/FhEaRJ+WDejTsoH72IFD+Szb4dTnLBnRuSfryBXXD2QXuOO5Eo1jwpz6nMWjObs0izl+ffXjadIDxnwOW+Y6D7K3/OIcL8qDef+Cxe9Dn5uh/11Qr37V2hKRSlFS04s0DUhEpGaIDA1iYJt4BraJp1tQKusOFLHoYCz/W7XLPdXp991Z3D9pGc/+73eu6ZfMlX2SiYsIOfpNG7RyVkwf9pAz/WjBW7BrefGbFpZ+7NRd6n8XDLjLKUQvIuIlKnskIrVNXEQIQ9o2ZEiZFdd3ZeSWq8+5dFs6mbmFR1y7MyOXnRm5/G+lM1syKMBwVqdErumfQq/kuKpNV0/uB2O/cVZF//nvpQtLFmTDLy/Agneg3x3Q91YIiz75dkTkuJTUFBGROqdNXCA3XtiTLfsP8d7szfxn4Tb3ypx7s/L4vx/X8uq09VzcsynXDWhBy4aRR79ZSD3oeQ30uBq2zYfZL8Lv3znvFWQ7BeYXvw8j/gZdLtOCQiLiLSp7JCK1XmJMGIkxiZxZZsX1Lfuz3VPWl25LZ0VqBrkF5R/sFLos3yzbyTfLdtKhcTTX9Evmgm5NCA85xuKRx2IMtBoGLYc69dV/fhJ2Fz/czsuE6U85ozcH3ENAUQdcgaFV+NQicjRKaoqISJ2V3CCCR8/vyL2nteXj+VuZMGcTuzOdaU25BS4+/HUrH83byoj2CdwwqAV9WtQ/+pN9Y6B5H2j+CWyaCf/7a+nIzayd8OWtMO8NOPMpSBlYTZ9QROoQlT0SkTrHGENKfAQp8RFc0M1ZrKewyMW6PQfdCxH9tjWd1Tsz3des3pnJg5OX84/v1/CHXk25qm8KzRvUO9kOQLuzoc2ZzgydaU/B/nXOezkHYMo4+gbHsKfRIGgd6SxC6Y1FjUTqKCU1RUSkzoupF8ytQ1tx/cAWfLs8lbdmbmJVcfBrLUxZvZspq3fTuUkMNwxqwTmdGxMceIwRly0Gw00znJUxpz4OB4tHTO1cChNGQvtz4fTHnSnsIiIeoLJHIiKOoMAAOjSOpkPjaC491Tm2emcm/567hS9+2+4exZmRU8Bbszbx9i+bGNauEVf3S2Zwm4YEBJxE0jEgADpdBB3Od8oSTf8HpG8BIKQgg6Y7voG3v4G4FOg0GjpfAo06eOgTi9Rdina8yBgTZYxJMsYkodpGIiI1XkhQABd2b8q3dw3k4xv7MLx9+RUsl+/I4O6JSxjyz2mMn7mBzNyCo98sIBC6j4E7F8Pg+yEovPS9Nd/Aa33gfw85T/FFRERExGs6NI7mHxd1Zt5fTuPhkR1ILjMy01r4ec0exr63gOHPTeedXzaRkXOMGO9YAoOg2+Vwx0I49wWIblL+/QObYdZz8HpfeL2/s31g80l/LpG6TklN77oP2FH86qzaRiIi/sEYQ/9W8bw79lSm/HEIl/duTmhQ6a/M1IxcnvpuDf2emsrjX69iW1r20W8WGgnDH4I7F0KXS0uPuwpg7qvwcndY8DbuFYtERERExCti6gVzw6CWTLtvKO9deyrD2jUs9/7m/dn8/ZtV9H1qKn/9YjlrdmUe5U7HERQCva6Du5extMtj7EwcAaEx5c/Zs9KZ0fNSV3j7dJj3Jhzcc5KfTKRuUlLTu54DmhS/lickJPi4OyIicqJaN4rkHxd1Zs6Dw7n3tLbER5auiH4ov4h3Z29iyLPTuP2jxfy29RijLmOawkXj4cafoVnf0uM5B+Db+2DiFZCd5sVPIiIiIiIAAQGGYe0a8d61vZn+p6HcMLAF0WGl1flyCor4eN5WznpxFpe+OZfvlu+koOgkZl4GBnGgfjd+b38X/GktXPohnDIKgsLKn7d9Pnx/PzzXDv49Cn77EHLSq/IRReoE1dT0ItU2EhGpPRpEhnL3aW24eUhLvlqyg7dnbWLdnoMAuCx8u3wn3y7fSc/kOG4c1ILTT0kksKKaTE16wnU/wKqv4Ke/uest8ft38MYguPgdaN73yOtERI7BGBMFRBXvquyRiEglpcRH8PC5p/DHM9ry1ZJU3p+zmTW7stzvz9uUxrxNaSRGh3Fln+Zc1rs5DaNOYjXz4DDocJ7zysuCNd/C8kmw4WewRc451gUbpzmvb+6FNmc4M33an+vU7RSRcvS3QkRE5ASEBQdy6anN+fHewUy49lQGto4v9/6iLQe45cPFDPu/6UyYvYlDeYVH3sQY6DgKbp8PfW8vPZ65Hd47x6mvpISEiJwYlT0SEamCeiFBXN67Od/fPYj/3NyPkV0al3tAvSszl+d+Wkv/p6dyz8TfWLz1APZkyweFRkHXy2DMJPjTOhj5PCQPKH9OUb5Th/0/V8GHF0FmahU+nUjtpKSmiIjISTDGMLRdIz68oQ/f3z2I0T2aEhxYGvhuTcvm0a9X0e8fU3n6+zXsysg98ibBYXDWU3DZJxAW6xyzRU59pY9Gq66SiJwIlT0SEfEAYwy9W9TntSt6MPuB4dw1og3xkaUjMwuKLF8uSeWi1+dw/quz+WzhNnILik6+wYgGcOr1cO13cO9KOP3v0Lhr+XM2ToPX+8GKySffjkgtpKSmiIhIFXVoHM1zf+jKLw8M5/ZhrYgJD3a/l5lbyBszNjDwmZ+599Ml/F5mOpNb+3Pgll+gWZ/SYxt+hjcGwsYZ1fAJRMTfWWuzrLWp1tpUQGWPREQ8IDEmjD+e3pY5Dw7npcu60TM5rtz7y3dk8OdJy9wPsbcfOMbikZUR0xQG3AU3z3RWUO9zK1D80Dw3HSZdC5NvgtyMqrUjUkso2hEREfGQhOgw/nxme+b+ZTh/v6AjKQ3qud8rdFm++G0H573yS8WJzdhmMPZbGPhH3MHrwd3w7wvg5yehqIJp7CIiIiLidSFBAVzQrQmf39qfb+4cyB96NSU0qDSdciC7gDdmbGDwP6dx478X8su6fSc/Nb1EfBs4+2knPoxpXnp82afwrwGw+Zeq3V+kFlBSU0RExMPqhQRxVb8Upt43lPFX9aR3Sn33e/lFLmat21vxhYHBcNo4GPM5RDQsPmhh5j/h3+erlpKIiIiIj3VqEsM/L+7Kr38ZwV/Obk/TuHD3ey4LP63azZh35nHa8zN4f85mcgqrmNxMGQC3/gJdLy89lrENJpwLPz4ChXlVu7+IH1NS04uMMVHGmCRjTBJahVJEpM4JDDCc0TGR/9zSj7tHtHEf37L/OFOTWo9wpqO3GFx6bMtsZzr62h+91FsRERERqay4iBBuHtKKGX8exttX92JQm/KLR27Ye4hx/13JvdOymb+zijNuwmLgwjfgkvchvGQKvIU5L8Nbw2H3yqrdX8RPKanpXVqFUkREADglKdq9vSWtEvWWohLhqi9h2ENgin9dZ++Hjy+BHx+GogLvdFREREREKi0wwHDaKQl8cH0fpt43hLH9U4gKDXK/n1sEk9fle6axjqPg1rnQanjpsd0rYPxQmPMqaCCV1DFKanqXVqEUEREAksvU19y6/1DlLgoIhCH3wzVfQ1Tj0uNzXoF3z4IDWzzcSxHxV5ohJCLie60aRvLo+R2Z+9cR/P2Cju7j+3IsLlcVp6GXiG4MYybD2c9CUJhzrCgffnwIPrgAMrZ7ph0RP6CkphdpFUoRESnRvH5pUnP7gRwKi04g4ZAy0JmO3vr00mM7FsKbg2DVfz3YSxHxY5ohJCJSQ0SGOvXVo8OcEZuFFtKyPTRaE8AY6HOTs0p6466lxzfNhH/1h+WTPNeWSA2mLJuIiEg1qBcSRMOoUMBZCT01PffEbhARD1f8B07/OwQUT2nKzYD/XAXf/RkKTvB+IlLbaIaQiEgN0zimdBGhXRleiNUatoPrp8CgP5WWK8rNgM+vh0nXQc4Bz7cpUoMoqSkiIlJNUspMQd+SVskp6GUFBMCAu+DaHyCmeenx+ePhndNh/wYP9FJE/JFmCImI1DwJMWHuba8kNQGCQmDEI3Dt9xCbXHp8xefwrwGwcbp32hWpARTtiIiIVJPm9SPc28ddAf1Ymp0Kt8yE9ueWHtu1DN4cDMs+q0IPRURERMRTGkeXJjV3Znp5Vk3zvnDrbOg+pvRY5g749wXww181q0dqJSU1RUREqkm5xYIqswL6sYTHwaUfwjn/B4EhzrH8gzD5Bmd1dFdR1e4vIiIiIlWSWGak5m5vjdQsKzQKLngNLv0I6jUoPf7ra84K6buWe78PItVISU0REZFqUjapuXnfSUw/P5wx0PtGuGEK1G9ZenzOKzDxCsjNrHobIiIiInJSyiY1d1ZHUrNEh3Ph1rnQ5ozSY3tXw/hh8MnlsOBtSNtUff0R8RIlNUVERKpJcoPS6edVHqlZVuOucNMMaHdO6bG1P8CHF2nEpoiIiIiPlE1q7srMqd7GoxKcRSZHPg/BxQ/WXQXw+3fw7X3wcjd4awRkp1Vvv0Q8SElNERGRapJcv8xCQfuzsdZ67uZh0c5Uo4H3lh7bvgDW/ei5NkRERESk0hpXx0JBx2IMnHo93DwLmvU98v0dC2HWc9XfLxEPUVLTi4wxUcaYJGNMEhDscrl83SUREfGh2HrBRIUFAZBTUMTerDzPNhAQAKc9Cn1vKz02703PtiEiIiIildI4Oty9vTMj17MPtE9EfGu47ge4czGc/U9oOaz0vQXvwME9vumXSBUpqeld9wE7il+dd+/e7ePuiIiILxljSCkzBX2LJ6egl9X3VjDFv+I3ToO9a73TjoiIiIgcVXR4ECHFIVl2fhFZeYW+64wx0KAV9LkZrvoCEjs7xwtzYPZLvuuXSBUoqeldzwFNil/LExISfNwdERHxteYNyk9B94rY5uXra84f7512RKTG0AwhEZGaxxhDXJhx71fLCuiVYQwM/UvpvkZrip9SUtOLrLVZ1tpUa20qUBAQoG+3iEhdV7au5tb9HlgB/Wh631S6vfQTrYQuUvtphpCISA1Uv0xSs1pXQD+edudotKb4PWXZREREqlFymZGam701UhOgxWBo2N7Zzj8ISz72XlsiUhNohpCISA0UWyap6ZPFgo5GozWlFlBSU0REpBoll6mp+b+Vu7h8/K/84/vVfLd8J9sPeHBFdGOg942l+/PHg6ajitRamiEkIlIz1Q8t/fd4V2YNSmqCRmuK3wvydQdERETqklYNIwkw4LKQV+hi7sb9zN243/1+g4gQujSNoUvTWLo2c77GR4aeXGNdLoMpj0NeBqRtgA0/o1/9IiIiItUnrqZOP4fS0ZoTr3D2F7wDA+6GyEa+7ZdIJel/NiIiItWoYVQofz2nA2/M2MC+g/lHvL//UD7Tft/LtN/3uo8lxYTRpWksXZrF0LVpLJ2bxhAdFnz8xkIjofuV8Ovrzv78N6HJ7Z76KCIiIiJyHHHlpp/nVOle2fmF7MrIZVdGLjszctmVmcvOjBx2Z+aRFBPGeV2T6JkchzHm+DcrUTJac9fy0tGaZz5ZpX6KVBclNUVERKrZDYNacv3AFqRm5LJsWzpLt2ewbHs6y7dnkJVXeMT5qRm5pGbs4oeVu9zHWsZHlBvR2TEphrDgwCMbO/UG+PVfgIV1PxEedxE59Rp78dOJiIiISInKLBRkrSUrr7A0WZmRU/zV2d+d6XzNyCk4Zlvvz91C8/r1GNW9CRd2b0KL+Ihjng9otKb4NSU1RUREfMAYQ5PYcJrEhnN2ZyfJ6HJZNu0/xLLt6Szd5iQ6V6Zmkld4ZC3MjfsOsXHfIb5ckgpAYIChbUIUXYsTnV2axtAuMYrgBq2gzemw7kfAkpT6HRtaX1+dH1VERESkzio7UnNHeg4fzdtSJnnpjLTclZHLofwij7S3NS2bl6eu4+Wp6+jWLJaLejTh3C5J1I8IOfpFGq0pfkpJTRERkRoiIMDQqmEkrRpGcmH3pgAUFLlYuzuLZcWjOZduy+D33VkUucovKFTksqzemcnqnZlMXLANgNCgAE5JimZ01FmM4UcAEndOYUvypdX7wURERETqqOgQQ6CBIgtZuYU89MWKk75XcKAhMSaMxtHhzteYMBJjwqgfEcKvG/fzzbKdZOWWzvpZsi2dJdvSefzrVQxt14iLejRhePtGR87u0WhN8VNKaoqIiNRgwYEBdExyppdf3rs5ALkFRazamcmybeks257B0u3pbNh76Ihr8wpd/LY1nSU0ol9IY1oF7CS4KJu98z7kHwWt3CM6m8aFn1jtJRERERGplABjaBxh2H7QHvO8sOAAGseEkxhdmqx0voaXJi/rhRAQUHHMdkG3Jow7ryM/r9nD5MU7mP77HgqLH4IXuixTVu9myurdRIUFMbJzYy7s3oRTU+qX3u/w0ZpzXoYznvDo90LE05TUFBER8TNhwYH0aB5Hj+Zx7mNZuQUs35FRbkTnjnSnGL0lgOcLL+G1kJcBOK/gf/xr1lDetM2A0hXXz+2SxHldkwgJCqj+DyUiIiJSS111Sihz0qNwWVs+URldmryMCQ+u8kPmsOBAzuncmHM6NybtUD7fLktl8m87+G1ruvucrNxCJi7YxsQF22gSG86o7klc2L0prRtFlh+tOf9t6H83RDasUp9EvElJTRERkVogKiyY/q3i6d8q3n1s38E8lheP5Fy2rSELtv7MqXYFQcbFY0Hvc3nBQ4Apt+L60z+s4eq+yVzZN/nYtZdEREREpFLa1Q/k5ot6V2ub9SNCuKpfClf1S2HzvkN88dsOvvhtB1vTst3n7EjP4bVpG3ht2gY6N4nhwm4duLpRJ4L2rCgerfmSRmtKjaakpoiISC0VHxnKsPaNGNbeqYdkd4/H9a+BBOCiX+Aq7o1fydtp3cqtuL43K4/nflrLq9PWc1GPplw/MIXWjaJ89RFEREREpIpS4iO49/S23HNaGxZvPcAXv+3gm2U7Sc8uXU19+Y4Mlu/IYF7gmbwZ7NT9tPPfxmi0ptRgml/mRcaYKGNMkjEmCQh2uY5cvVZERKS6mISO7Gg60r1/d9H7LP3LAKbeN4Q/n9mOhOhQ93t5hS4+mb+V056fydj35jNr3V6sPXYtKBERERGpuYwx9EyuzxOjOjP/r6fx5lU9OatjIiGBpamh/xX1YKUr2Tm/MIfp7z3MnPX7jlikUqQm0EhN77oPGFeys3v3bh92RUREBDanXE7C7pmEFGRA5g4CZj9PqxF/4/ZhrblxUEu+W76Tt3/ZyIodme5rpv++l+m/76VdQhTXDUzhgm5Njlw1U0RERET8RkhQAGd2TOTMjolkZBfw7fKdfPHbdhZsPsCLhaN5K+R5APrsm8zgtwcTGJ3IBd2TuKh7U9olahaP1AwaqeldzwFNil/LExISfNwdERGp64qCItjY8prSA3Negf0bACe4HdW9CV/fMZBPb+rL6ackULZe/e+7s3jg8+UMePpnvl++s5p7LiIiIiLeEFMvmCv6NOezW/oz6/5hdBl+OesCWgAQbvJ5JPgDdmXm8uaMjZz54kzOfmkWb83cyJ7MXB/3XOo6JTW9yFqbZa1NtdamAgUBAfp2i4iI7+1KHAZNT3V2ivLhhwfLvW+MoU/LBrx1dS+m3TeUsf1TqBdSOjJz/6F87vzkN35Zt686uy0ix6CyRyIi4gnN6tfjztPa0vqK593Hzg+cy4iARe791TszefK71fT9x1Suemcekxdv51CZGu0i1UVZNhERkbrGBMA5zwLFwzDX/Qi//1DhqSnxETx6fkfmPjiCv5zd3l13s9BlufXDRazZlVnhdSJS7e4DdhS/OqvskYiIVIVpPRy6XuHefy3mQ0Z3jCIkqDSN5LIwa90+/vifpZz65BTu/XQJi7Yc8EV3pY5SUlNERKQuSuoOPctMQ//hASg4+hSimHrB3DykFV/dPpDE6DAAsvIKue69BezW1CORmkBlj0RExLPOfBIinJXPw3J281zsZBY+fBrPjO5M35b1y52anV/EF7/tYPS/5nDVO/NYvFXJTfE+JTVFRETqquF/g7BYZ/vAZpj7ynEvSYwJ471rTyUy1FlrMDUjl+smLNCUIxEfU9kjERHxuHr1i2f3FFv0HtE7f+XSU5sz8aZ+zH5wOH8+sx2tG0WWu2zWun1c9Pocrnl3Pr8puSlepGhHRESkropoAMMfLt2f+RykbzvuZR0aR/P6lT0IDHCmr69MzeSOjxdTWKQafiIiIiK1yimjoP25pftf3wX52QA0iQ3n9mGt+enewXxz50Au6t6EgDKLTM5Yu5cLX5/D2Pfms2RberV2W+oGJTVFRETqsl7XQWJnZ7swB358+NjnFxvctiFPXdjJvT/t972M++9KrLXe6KWIiIiI+IIxcM7/QWiMs5+2Eab/47BTDJ2axPD8pd346Y9DGNUtqVxyc/rvexn12myum7CAZdvTq6/vUuspqSkiIlKXBQTC2WWmFa36EjZOr9Sll57anDuGtXbvfzRvK+NnbvRs/0RERETEt6Ibwxl/L92f+yrsWFzhqa0aRvLiZd358d4hXNAtCVMmufnzmj2c/+ps/jV9g5c7LHWFkpoiIiJ1XXI/6HJp6f5390NRQaUuve+MtlzQLcm9/4/v1/DNslRP91BEREREfKnH1ZAyyNm2LvjvnceMF1s3iuSly7rz072DOa9r+eTmMz+s4cNft3i5w1IXKKkpIiIicPrjEFJc5H3f7zB/fKUuM8bwz4u70LtF6QqYf/zPUhZuTvNGL0VERETEF4yB81+GoHBnf/cKmP3icS9r3SiKVy7vzo/3DKZ3Smm8+MhXK/QgXKpMSU0RERGBqEQY8kDp/rR/QNbuSl0aGhTI+Kt60rJhBAD5hS5u+PdCNu075I2eioiIiIgv1G8Jwx8q3Z/xT9j7e6UubZMQxbvXnkrXpk5tTmvh3k+XMGPtXm/0VOoIJTVFRETE0ecWiG/rbOdnwc9/P/b5ZcTWC+H9a3sTHxkCQHp2AWPfm8/+g3ne6KmIiIiI+EKfWyGph7NdlO9MQ3e5KnVpZGgQ713bm9aNnNlBBUWWWz5YxOKtB7zVW6nllNQUERERR1AInPV06f5vH0Lqb5W+vFn9erx9zamEBTvhxZb92dz474XkFhR5uqciIiIi4guBQXDBqxAQ5OxvmwcL3q705fUjQvjg+t40iXWmsecUFHHtewv4fVeWN3ortZySmiIiIlKq9Qhoe1bxjoXvH3TmB1VSt2axvHRZd3cx+MVb07n30yW4XJW/h4iIiIjUYAkdYdB9pftTHoX0rZW+vHFMOP++vjcNIpwZPhk5BVz1zjy2pWV7uKNS2ympKSIiIuWd8SQEBDvb236FFZ+f0OVndkzkkZGnuPe/X7GLx79ZRZESmyIiIiK1w6D7IL6ds11wCL6+54QehLdqGMmEa3sTGeqM+NyTlcdV78xjb5ZKF0nlKakpIiIi5cW3hr63lO7/NA7yT+zJ+XUDW3DtgBT3/oQ5m7noX3NYsyvTQ50UEREREZ8JCnWmoVM8PWfDVFg68YRu0blpDG9d3YuQICc1tXl/Nle/O5+MnAIPd1ZqKyU1RURE5EiD/wwRDZ3tzO0w5+UTvsXDI0/hzI4J7v2l29I59+VfeP7H38krVJ1NEREREb/WrLez0GSJHx6Eg3tO6Bb9WjXg1cu7E1CcG129M5Mb319ITr5iRTk+JTWPwRjzV2OMNca86uu+iIiIVKuwGBj+SOn+Ly9C+rYTukVggOHVK3pw3+ltCQl0Qo5Cl+Xln9cz8uVfWLQlzYMdFhEREZFqN/xhiG3ubOemw3d/PuFbnNExkWdGd3Hvz9+cxh0fL6agqHKrqkvdpaTmURhj+gI3Ast83RcRERGf6D4GEosDzMIcmDLuhG8RHBjAnSPa8N3dA+mZHOc+vn7PQS5+Yy6P/nclh/IKPdVjEREREalOoZFw7oul+6u+hNXfnPBtLunVjIdHdnDvT12zh/snLdNik3JMSmpWwBgTA3wEXA8c8HF3REREfCMgEM5+pnR/xeewZe5J3ap1oyg+u7kfj53fkXohgYBTS37CnM2c8cJMZqzd64kei4iIiEh1az0Cul1Zuj/znyd1mxsGteS2oa3c+1/8toPHv1mFPYEFiKRu8bukpjHmYmPMK8aYWcaYzOLp4R8e55qmxph3jTGpxpg8Y8xmY8yLxpi4o1wyHphkrf3Z859ARETEjyT3h44Xle7/8AC4Tm4qUECA4Zr+Kfx472CGtG3oPr4jPYdr3p3PH/+zhAOH8qvaYxERERGpbmc8AYEhzvbOpXBgy0nd5s9ntuPy3s3d+xPmbOaVn9d7oodSC/ldUhN4GLgD6AbsON7JxphWwCLgWmA+8AKwEbgbmGuMaXDY+TcCrYFHEBERETj9cQgKc7Z3LoUlH1Xpdk3j6jHh2lN5/g9dia0X7D4+efEOTn9hBl8vTdUTealzVMtdRET8Wr360GJI6f6aE5+CDmCM4YlRnRjZubH72PM/reWDuZur2EGpjYJ83YGTcC+wHVgPDAGmHef814FGwF3W2ldKDhpjni++15PALcXH2gFPAYOstRoqIiIiAhDbDAbcDTOKp6J/c68zrSg87iiv+tCkBzTqcNRbGmO4qEdTBrdtyGNfr+LrpakA7DuYz52f/MZXS1J5YlQnEmPCquMTiviUarmLiEit0OE8WP+Ts736a+h3+0ndJjDA8PylXcnMLWDWun0A/O2/K4kOD+aCbk081VupBfxupKa1dpq1dp2txBAOY0xL4AxgM/DaYW+PAw4BVxljIoqP9QPigRXGmEJjTCFO4vS24v1QT30OERERvzLgboguDiJdBZC+1Rm1uXE6rPwCFr4Ls56DHx+Gr26D1/vC1L87hTOPIT4ylFcu787bV/ciMbo0gTll9W5Of34GH8/bqgLxUquplruIiNQa7c4BU5xm2vorZO0+6VuFBgXyxpiedGsWCzgh5X3/Wcr03/d4oKNSW3h9pKYxpiEwADDAXGvtLm+3Wcbw4q8/WmvLFQCz1mYZY2bjJD37AlOBL4GFh93jPWAdzgjO447eNMYsOspb7bOyspg+fXqlO+8tWVlZADWiL75Q1z8/6Hugz6/PD/r8J/P541Ju4pRV/0dwYVblLpj1f+xau5Df292BDQg+5qlBwN9ODeCztUFM2+ashp6VV8hfv1jO+9NXcm2nUBIjPPMsVn8Gjv/5S87xN56OO40xF+M84O4GdAWigI+stWOOcU1T4HHgLKABsBMnxnzMWltR0tJdy90Y87eq9FdERMSnIhtC8/6w5RfAwu/fQq/rTvp2EaFBvDf2VP7w5lzW7TlIoctyy4eLGNu/BZ2aRNMxKYbk+vUICDCe+wziV6qc1DTGdAMuAiZba5cc9t71wCtAyQjHQmPM/dbal6rabiW1K/669ijvr8NJarYFplpr04H0sicYYw4BadbaFV7qo4iIiF84UL8bswf8m6DCbIIKDxJckEVQYRbBBQfLfY3OXEdM5hoAEndPJzRvPys7PkhhcOQx718v2HBNx1D6NA7ivRV57M52Rmj+fsDFI7NzuLB1MGemBBOowLXO8kHc+TBOMvMgTvmj9sfpXytgDk7po6+ANUBvnFruZxljBlhr95c5v6SW+1VV6KOIiEjN0eG84qQmzmyebldC0MlPeo2LCOGD6/sw+l9z2JGeQ26BizdmbHC/HxkaRIfGUXRMiuGUpGg6JkXTplEUIUF+NzFZToInRmqOwQnUyk3vNsZ0Ad4AAoFfgCxgGPC8MWaBtXaOB9o+npjirxlHeb/keKynGrTW9qzouDFmUVRUVI+hQ4d6qqmTVjIyoyb0xRfq+ucHfQ/0+acD+vz6/EO910hRIXz7R1j8PgBx6csZuPYJuPIzpz7ncQwFxp5XxItT1vHWrI0UuSwFLvjP2gJWHQrnmdFd6JgUc7zbHJX+DEwHjv35o6KiqqczJ666407VchcRETkRHc6FHx5wtjfNhCcTIaYZNGhd5tXK+RrTFAICj3vLxJgwPryhD5eNn8vuzLxy7x3MK2TB5gMs2Fw6GSIkMIC2iZF0bBxDxyZOorN9YjQRof64rIwciyd+ogOARdbaw4sl3IETWP7dWjsOwBgzEJiBE8xVR1LzeEqGehy1WJe1dmj1dEVERKSWCAyC816CuGSY+rhzbO9q+PcFcNvcSj2tDwsO5MGz23Nul8Y88PkyVqZmArBiRybnvzqbmwe35K4RbQgLPn4gLLVKtcad1lp3EtOYY48QrkQt95twarnfZ609RPla7iXnBQKDjTG3ABHW2jxERET8SUxTaNobts939q0L0rc4rw1Ty58bGAr1W5YmOcu+IuKhzO/eFvERTL1vKD+v2cPK1AxWpWayMjWTtENHPhfML3KxYkcmK3ZkugsMGuPco2NSDB2LR3R2TIqhfkSIt74TUg08kdRsBkyp4PhpQC7wdMkBa+0vxpifcYK46lAyEvNowzmiDzvPo4wxUTi1lwCCXS7XsU4XERGpPYyBQfc5T+a/vM1ZXChtA6z9H5xyfqVv06lJDF/ePoC3Z23ihSlryS90UeSyvD59Az+s2MXTo7vQu0V9L34QqWFqctxZa2u51/U6tP5OPz//pp+f/6uLP8N6ja+iZY4h4tBmwnL3YI42jqwoz3nwvXf1EW8VBkaQXS+JnPAkDkU0Z2fj0ygIiSUa6BcO/VqBbRnEgbxAtmS62JrpYkvxa3/uke1ZCxv3HmLj3kN8vTTVfbx+mKF5VADJ0QE0j3a+Nggz7oeZdfHnV92qUsvdE0nNeGBH2QPGmPpACjDDWptz2PkrcZ6yV4ffi7+2Pcr7bYq/Hq3mZlXdh/NkHoDdu09+5S8RERG/1OUPsPd3mPV/zv6yT08oqQkQHBjArUNbcWbHBB6cvJz5m9IA2LjvEH94cy5X9U3m/rPaERV27MWIpFaoyXGnarmLiIgA2RHNWdH5rwAEFOUTlruLetmphOekUi97R/HXVEIK0o96j6CiQ0RnrSM6ax0ADfYv5LceT5c7xxhD/TBD/bAAujcqPX4w37I1y1Wc7CxiS5aLnQdthanVtFxLWm4RS/YWuY9FBONOdCaHQ8e4k/5WiJd5IqlZABw+RKJ78dfFFZx/iGNM9/awkilDZxhjAso+NS8eRTkAyAF+9VL7z+GsaAnwQ0JCQmcvtSMiIlJzdb28NKm59n+QnQb1Tnx0ZcuGkUy8sS8fz9/K09+v4WCes0r6B79uYcrq3bx1dS86NTn5WpviF2py3Flra7nX9Tq0/k4/P/+mn5//08/wGHIzYP+G4tf6Mq8NkF9+9F5M5mqGdkyChkcbs3ZsOflFrNnlTFlfmZrJqtQMVu/KIr/wyBm1hwpgdZqL1WkuIIDhzYJ49/ahJ9WuHF9Varl7Iqm5HmfKT1ln4ASQFSULGwO7PNDucVlrNxhjfizuz+04K2KWeAyIAN4srmvkjfazcArVY4wpCAjQ6lsiIlIHxbeGJj1hxyJnGvrKyXDqDSd1q4AAw5i+yYzo0IiHv1jB1DV7ANiZkcvNHyziu7sHEROuEZu1WI2NOytBtdxFRETKCouBJj2cV1nWwsE9ToJz2lNlVlOfDEMfPKmmwkMC6d48ju7NS4ddFha52LD3ECtTM1ixI9Op1bkzk6zcwnLX/rytkCmrdnPaKQkn1bZ4jyeSml8DDxtjxgOv4kypuQWnrtH/Kji/L7DpZBszxowCRhXvJhZ/7WeMmVC8vc9a+6cyl9yGUxz+ZWPMCGA10AdnRcy1wEMn25dK9FU1NUVERAC6XOokNQGWfnrSSc0SjWPCefuaXny9bCcPTV5OVl4hO9Jz+Ovk5bx6RffjLuoifqta484TpFruIiIinmAMRCU4r1OvL01qrvgchjxQbgGhqggKDKBdYhTtEqO4qDivaq1lW1oOK1Mz+Hj+Vmat2wfAg5OX81NyHHFaWKhG8cTQweeALcD1wG/ApzgB1QvW2syyJxpjTgHaAz9Xob1uwDXFrzOLj7Usc+zisidbazcAvYAJOMnM+4BWwMtAP2vt/ir05Xjuw6n7tAPorJqaIiJSZ3UaDQHFz1K3z4e0jVW+pTGG87sm8c+Lu7iPfbt8J58u2Fble0uNVd1x54moCbXcFXeKiEjt0vZMCI5wtvethd0rvdqcMYbmDepxdufGvHJ5d2JCnQTqvoN5PPKVSl7XNFVOalprM4D+OKs1rgFmA3daax+u4PRzgKXAt1Vo71FrrTnGK6WCa7ZZa6+11ja21oZYa5OttXdba9NOth+V9BzQpPi1PCFBQ5VFRKSOioiH1mVmDS/7j8dufXbnxlzRp7l7/9GvV7J+z8mvoig1V3XHnSeoXC33sm9UYy13xZ0iIlK7hERAu7NK91dOrramY+uFcG3H0pGZ3yzbWW7ldPE9jxR5tNbutNbeYK3taK0dbK197Sjn/Z+1tru1dqkn2q3prLVZ1tpUa20qoJqaIiJSt3W5tHR76USnXpKHPDLyFNo0igQgt8DFHR//Rm5B0XGuEn9UU+PO4tlBP+KsxH77YW+X1HL/tzdruSvuFBGRWqnT6NLtFZ97NIY8nm6NghjctLRy4yNfrWBPVm61tS/H5tFoxxjTwBhz4suZioiISO3X7mwILS4reGATbF/gsVuHhwTyyhXdCQ1yQps1u7L4x3erPXZ/qXmqI+40xowyxkwort1esjJBv5Jjxpj/O+yS24A9OLXcvzTG/MMY8zNwL9VQy90Yk2SMSUI1NUVEpDZpfVqZGHIzpP5Wrc1f3j6EJrHhAKRnF/CXz5djqzGxKkfnkaSmMeYCY8x6nCBurzFmrTHmfE/c258puBQRESkjOBxOKRMeLJ3o0du3T4zm4XNPce+/P3cLP61SXcHapprjzm6olruIiIhvBYVC+3NL96txCjpAeJDh2UtKa7hPXbOHzxZtr9Y+SMWqnNQ0xvQFPscJ8EzxqzXwefF7dZmCSxERkbK6XFa6vXIyFOZ79PZj+jTnjFNKawn+edJSdmbkeLQN8Z3qjjtVy11ERKSG6HRR6faKL6CaB431bxXP2P4p7v3Hv17F9gPZ1doHOZInRmr+sfg+jwMJQCLwBBBY/F5dpuBSRESkrOQBENPM2c45AOt/8ujtjTH88+IuNI4JA5wpQvdMXEKRS1OEagnFnUehmpoiIlKrtRwK4XHOduZ2j5YxqqwHzmpPi3hnJfaDeYXcP2kZLsWYPuWJaKcfMKv4SfZea+0ea+3fgFk4q1PWWQouRUREDhMQAJ0vKd338BR0cFaqfPHSbgQYZ3/epjRen7be4+2ITyjuFBERqYsCg6FDmWozKz6v9i6EhwTyf5d0dceYczbs58N5W6q9H1LKE1m2RsCvFRyfBzT0wP1FRESkNulaZgr62h+cEZse1qdlA+4c3sa9/+LUdSzc7O3Zv1INFHeKiIjUVWVXQV/1JbiKqr0LPZPjuHlIK/f+P75bw6Z9h6q9H+LwRFIzGKjoJ3gICKrguIiIiNRlDdtB427OdlE+rPzSK83cObw1vVOcxbGLXJa7Jy4hI7vAK21JtVHceRRaoFJERGq9lIEQ0cjZPrgbtsz2STfuOa0N7RKiAMgpKOJPny1VqSMf0XxoL1JwKSIichRdLi3dXvapV5oICgzghcu6ERMeDMCO9BwenLwMaxV0Sq2kBSpFRKR2CwiEjqNK91dU7yroJUKDAnnuD10JKp6HvmjLAd6etdEnfanrPJXUHGuM+bnsC7gG4PDjxa+pHmq3plNwKSIiUpHOF4MJdLa3zoUDm73STJPYcJ4Z3cW9//2KXczYXuiVtqTaKO6smBaoFBGR2q9jmVXQV30FRb6ZhdOpSQx3jSgtdfTcj2tZuzvLJ32pyzw1TSel+FWRoRUcqytDJJ4Dxhdv/5CQkNDZl50RERGpMSIbQavhpaufL/sMhvzZK02d1SmRMX2b8+GvWwH4aHU+bWIDvdKWVIsUFHcewVqbBWQBGGO0QKWIiNROzfpAdBPI3AE5abBpBrQ+zSdduXVoK6as3s2y7RnkF7n443+W8MVtAwgO1O/g6uKJpOYwD9yjVlJwKSIicgxdLyuT1JwIg/8ExnilqYdHnsL8TWms3X2QAhf8a2kuo88qIixYyU0/o7hTRESkLgsIgI4XwtxXnf0Vk32W1AwODOC5S7oy8pVfyC90sWJHJq9NW889p7X1SX/qoionNa21MzzREREREalj2p0DIVGQnwX718OOxdC0p1eaCgsO5NUrenDeK7+QV+hi+0HLk9+u5u+jOnmlPfEOxZ1HZ4yJAqKKd1XLXUREaq9OF5UmNVd/A+e+AEGhPulKm4Qo/nxGO578bjUAr/68nhHtE+jcNMYn/alrPD500BjT2xjzJ2PMi8aYN4wxfzfGjDLGhHm6LREREfFjIfXglPNL95dN9GpzbROi+Nt5p7j3P/h1C/9bucurbYp3Ke4sR7XcRUSkbkjqAXEpznZeBqz3bfns6wa2oHdKfQAKXZY//mcJuQVFPu1TXeGxpKYx5lRjzGJgLvAMcBdwE/BX4HNgmzHm2gqu81RdTxEREfE3Xf5Qur3ic68Xe7+id3N6JpROOb9/0jJS03O82qZ4nuLOCmmhIBERqRuMKb9g0ErfrIJeIjDA8OwlXagX4sSY6/Yc5IWf1vq0T3WFR5KaxpiRwCygG7AT+AQnwPxn8fZOoAHwtjHmqTLXnQHc44k+1ETGmChjTJIxJglNAxIRETlSyiCISnK2s/d7/Um7MYZrO4ZSP8yp3ZmRU8A9E5dQ5KoTa8nUCoo7K2atzbLWplprUwHVchcRkdqtU5mk5prvID/bd30BkhtE8NdzOrj3x8/ayMLNaT7sUd1Q5WjHGJOAE0C6gFuBZGvtGGvtX6y1D1prxwDNgZuBXOABY8wwY8z5wFdAvar2oQbTNCAREZFjCQiELpeU7k9/CmY9B0snwqZZkLYRCnI92mRkiOGWrqEEFK9JNH9zGq/+vN6jbYh3KO4UERERABI6QXzxgjwFh2Ddj77tD3Bln+YMahMPgLVw32dLyc4v9HGvajdPPMK9F4gErrTWvmmtPaJwgLXWZa19C7gSMMAHwGdAGvCFB/pQU2kakIiIyPF0uax0e+dSmPo4fHEzvH8uvNwdnkyAf7aCNwbBtH84UWIVtY0L5O4RpStTvjR1LfM36Wm6H1DcKSIiIkdOQV/xue/6UswYwzOjuxAV5lS72bI/m6e/X+PjXtVunkhqngXMs9YeN0i01n4JzAOSgNVAH2vtcg/0oUbSNCAREZFKSDgFWg479jnZ+2DXMpjxNGye5ZFm7xjemt4tnKLuLgv3TPyN9Ox8j9xbvEZxp4iIiDjKTkFf9yMs/dSZ5eOBB+AnKyk2nEfP6+je//fcLfyybp/P+lPbeSLL1gKYcwLnzwEsMMBau90D7YuIiIi/u3yi8zrraeh3h/PkvVkfiG4KJrD8uTuXeaTJwADDS5d1I7ZeMACpGbk8+PlyrA8DYTkuxZ0iIiLiaNjOmYYOUJgLX9zkzPJ5tjV8fBnM/D/YOAPysqq1Wxf1aMLpp5TO1L1/0lIyc727GGZd5YkVIIOBExnWUADkWWsPeaBtERERqQ2Cw6Dd2RW/5yqCX56Hn59w9tM2eqzZxjHh/HN0F276YBEAP6zcxUfztjKmb7LH2hCPUtx5FMaYKCCqeFcLVIqISN3Q+0b4+u7yx7L3wdrvnReACYBGp0DTXtBpNLQY7NUuGWN46sLOLNycxoHsAlIzcvn716t49pKuXm23LvLESM2dQOcTOL9j8TUiIiIixxcQWPoUHjya1AQ4o2MiV/crTWL+/ZtV/L6rep/oS6Up7jw6LVApIiJ1T49r4NofYMiD0GoEhMUceY51we4VsGgCvH8epP7m9W41jArlyQtLQ5bPFm1nyir9bvY0TyQ1ZwKnG2PaH+9EY0wH4ExghgfaFRERkbqifsvS7bQNHr/9X8/pQPtEZ5BbXqGLOz9ZTE7+EWvQiO8p7jw6LVApIiJ1jzGQ3A+G/QWumgz3b4bb58MFr0HPsc6DcXNY6it1SbV07ZzOjTm/a5J7/8HJyzlwSPXbPckTSc1XcaYCfWOMOeVoJxUHll8DgcBrHmhXRERE6orYZJyFrIGM7VCY59HbhwUH8srl3QkLdkKjtbsP8sS3qzzahniE4s6j0AKVIiIiQECAU2uz+xg47yW4dTY8uNWZdl6iMLfauvP4BR1pGBUKwL6DeTzy1Ypqa7suqHK0Y61dBDwLtAQWG2M+NsZcb4w5wxhzevH2J8Bvxec8W3xNrWeMiTLGJBljklBtIxERkZMXHAYxTZ1t64L0rR5vok1CFOPKrFb50byt/LCirsxc9g+KO0VEROSEhUaVxpEABdnV1nRsvRCeGV06Df2bZTv5emlqtbVf23lioSCstQ8YYw4BDwOXAZcedooBCoFHrbWPe6JNP3EfMK5kR7WNREREqqB+C8jY5mynbYT4Nh5v4rJTmzFr3V6+W74LgPsnLePUlPo0iAz1eFtychR3ioiIyAkLrle6XVB9IzUBhrdP4NJezfh0oRPHPvLVCvq0qE+j6LBq7Udt5LF5KcVBYxvg78A0YA3wOzC9+FibOhhYqraRiIiIp5Srq+nZxYJKGGP4x4VdaBIbDkBmbiEf/ur5UaFSNYo7RURE5IQElUkgVuNIzRIPn9vBHV+mZxfwl8nLsdZWez9qG48W27HWbrHWjrPWnmat7WitPcVaO6L42BZPtuUPVNtIRETEg6ohqQkQUy+Y+89q597/cN4W8gtVQqamUdwpIiIilRYcXrpdjTU1S0SFBfPsJV3c+1PX7OGzRdurvR+1jbJsIiIi4h+qKakJcHanxiREO1PO92bl8e1y1T6Smk213EVERI6hbFKzIMcnXejfKp6x/VPc+49/vYrtB6p/1GhtoqSmiIiI+IdqTGqGBAVwVd9k9/67v2zWFCGp6e4DdhS/OquWu4iISBlBvk9qAjxwVntaxEcAcDCvkPsnLcPlUox5spTUFBEREf8Ql1K6nb4Vigq82tzlvZsTEuSESst3ZLBoywGvtidSRarlLiIicjQ+nn5eIjwkkP+7pCsBxtmfs2E/H85T1ZyTpaSmiIiI+IeQCIhq7Gy7CktXQveSBpGhXNitiXv/vdmbvdqeSFWolruIiMgxBPt2oaCyeibHcfOQVu79p79fQ0a2dx/W11aKdkRERMR/1C8NAL09BR3g2oEp7u0fVu5iR7rvpiuJiIiIyEkKrle6XeC7kZol7jmtDa0aOtPQs/OL+Hj+Vh/3yD8pqSkiIiL+o36L0u20TV5vrn1iNP1aNgCgyGX599zNXm9TRERERDwsqOaM1AQIDQrkljKjNd+fs5mCIi3yd6KU1PQirUIpIiLiYdW4WFCJ6waWJlInzt9Gdn5htbQrIiIiIh5SdqSmD2tqlnV+tyTiI0MB2JWZy3fLd/q4R/5HSU3v0iqUIiIinlQ2qbl/Q7U0Obx9I5rXdwLhjJwCJi/eUS3tioiIiIiHlKupWTPKCYUGBXJ1v2T3/ju/bMJarYR+IpTU9C6tQikiIuJJZZOa2xdAuncXCwIIDDBc0z/FvT9hzmYFnCIiIiL+pFxNzZqR1AS4sk9zQoKc1Nyy7Rks2HzAxz3yL0pqepFWoRQREfGw+LYQ0dDZzkmDDy6EQ/u93uwfejUlMjQIgPV7DjJr3T6vtykiIiIiHhJU80ZqAjSIDGV0jybu/Xd+qZ7ySrWFsmwiIiLiP4LD4KLxEBDs7O9fB3Ne8nqzUWHBXNyzqXv/3dneX6RIRERERDwkOLx0uzAHatCsm+sGlNZv/3HVbrbsP+TD3vgXJTVFRETEv7QaDiP/r3R/2/xqaXZs/xSMcban/76XDXsPVku7IpWhBSpFRESOITAYApxZN1gXFOX7tj9ltEmIYkhbZyaStfDe7M2+7ZAfUVJTRERE/E/r00q396yulqftKfERjGjfyL3//pzNXm9T5ARogUoREZFjCSozWjO/Zo2GvH5g6WjN/yzcRkZOgQ974z+U1BQRERH/E90EQqOd7dx0yNpVLc1eW2Z60KRF2xVwSk2iBSpFRESOpewU9LdPg9Vf+64vhxnUJp62CZEAZOcX8emCrT7ukX9QUlNERET8jzHQsH3p/t7V1dJs/1YNaJcQBTgB538WeH/1dZHK0AKVIiIix9FyaOl22gb4dAws/dRn3SnLGMMNA1u69yfM3kxhkUrJHI+iHREREfFPjcokNfdUT1LTGMO1A1Lc+xPmKOAUERER8QsXvAqnPQahMaXH1v3Pd/05zPndkoiPDAEgNSOX71dUz0wkf6akpoiIiPinRqeUbldTUhNgVPcmxNVzVl/fkZ7DlNWqXSgiIiJS4wWFwsB74PJPSo/tXeuz7hwuLDiQMX2T3ftvz9qIrUGrtNdESmqKiIiIf2pY/SM1wQk4r+jT3L3/rlaoFBEREfEfjTqUbu9fB64i3/XlMGP6JhMS5KTqlm7PYNGWAz7uUc2mpKaIiIj4p7IjNfeuqZYV0Etc1TeFoAADwPxNaazYkVFtbYuIiIhIFdSrDxENne3CXEivOYvyxEeGcmG3Ju79d37Z5MPe1HxKaoqIiIh/imwE4XHOdv5ByKi+RXsSY8I4u3Nj9/57Gq0pIiIi4j/i25Vu76s5U9ABrhvYwr39v5W72JaW7cPe1GxKaoqIiIh/MuawupprqrX568osGPT10lT2ZuVVa/siIiIicpIati3d3vu77/pRgXaJUQxqEw+Ay+rh+bEoqSkiIiL+q2xdzb3VV1cToHvzOLo1iwUgv8jFx/NqztQlERERETmGciM1a1ZSE+D6MqM1P12wlczcAh/2puZSUtOLjDFRxpgkY0wSEOxyuXzdJRERkdqlbKH3alwsqMS1ZUZrfvDrFvIKa06heRERERE5inIjNWvW9HOAIW0b0qZRJACH8ov4z4LqK7PkT5TU9K77gB3Fr867d+/2cXdERERqGR8nNc/p3JiE6FAA9h3M49tlO6u9DyIiIiJygg4fqVmNC05WhjGmXG3N92ZvprBIA+UOp6Smdz0HNCl+LU9ISPBxd0RERGqZhmWSmnt/h2qeFREcGMDV/VLc++/O3oStYUGxiIiIiBwmOglCopzt3Aw4uMe3/anAhd2bUD8iBIAd6Tn8b6UGyh1OSU0vstZmWWtTrbWpQEFAgL7dIiIiHhXRACIaOduFOZC+udq7cHnv5oQGOb/jV+zIZOGWA9XeBxERERE5AcZAfJvS/RpYVzMsOJAxfZPd+2//stGHvamZlGUTERER/9aozGJBPpiCXj8ihAu7N3Hvvzd7U7X3QUREREROUMMyU9Br2AroJa7qm0xIoJO6+21rOov08LwcJTVFRETEvzU6pXTbB0lNgLFlFgz6YcUuth/I9kk/pO7SApUiIiInKL7MYkH7at5iQQANo0K5oFuSe//dX/TwvCwlNUVERMS/NfTtSE2A9onR9G/VAACXhQ/mbvFJP6RO0wKVIiIiJ6LsSM0amtQEuH5Q6YJB36/YybY0PTwvoaSmiIiI+LeyIzX3rvFZN64bUBpwfjJ/K9n5hT7ri9RJWqBSRETkRJQdqbm35iY12ydGM7B1POA8PH9/zmbfdqgGUVJTRERE/NvhT9mLfJNMHN6+EckN6gGQmVvI54t3+KQfUjdpgUoREZETFNcCAoKd7axUyM30bX+O4fqBpQ/PJy7YRlZugQ97U3Mo2hERERH/Fh4L0cUL9RTlQ5pvVoYMCDBc0y/FvT9h9iZcLuuTvoiIiIjIcQQGQYNWpfv71vmuL8cxpG1DWjWMAOBgXiH/Wbjdxz2qGZTUFBEREf9Xrq7mKp9145JeTYkMDQJgw95DzFq/z2d9EREREZHjKLdYUM1cAR2ch+fXlRmt+d7sTRTp4bmSmiIiIlILNOpQuu3DuppRYcFc0qupe18rVIqIiIjUYGXLGO2tuUlNgIu6NyWunjNdfvuBHH5cucvHPfI9JTVFRETE/5VNavpwpCbA2P4pGONsz1i7l/V7Dvq0PyIiIiJyFPH+sQI6QHhIIGP6Jrv339bDcyU1RUREpBYol9T03UhNgOQGEYxoX7rytFaoFBEREamhGpZdAb1mj9QEuKpvMsGBztPzRVsO8NvWAz7ukW8pqSkiIiL+r+xT9rQNUJjnu74A1w1IcW9PWrSdjGytUCkiIiJS4zRoAxRPsTmwyecx5PE0ig7j/K5N3Pvv1PHRmkpqioiIiP8LjYTY4uk4rkLYv96n3enXqgHtEqIAyCko4tOFW33aHxERERGpQEg9iG3mbFsX7N/g2/5UwvVlFgz6fsUudqTn+LA3vqWkpoiIiNQO5aagr/ZdPwBjDNcNTHHvvz9nC4VFLt91SEREREQqVq6uZs2fgn5KUjT9WzUAoMhl63SpIyU1RUREpHZo2L5028dJTYALujVxr1C5Iz2Hn1bt9nGPREREROQI5VZAr9mLBZUoO1rzk/lbOZhX6MPe+I6SmiIiIlI7NDqldHuvbxcLAggLDuSKPs3d++/N3uy7zoiIiIhIxeLLLBbkByM1AYa1a0TL+AgAsnIL+WzhNh/3yDeU1KyAMeZ2Y8wyY0xm8WuuMWakr/slIiIix9Co7EjNVb7rRxlX9U0hKMApPj9/cxordmT4uEciIiIiUk7ZkZr7/GOkZkCA4doyozXfm72ZIpf1YY98Q0nNim0HHgB6AL2An4EvjTFdfNorERERObr4tmCKQ5u0TVDg+6LpiTFhnNO5sXv/3dl1e4VKERERkRqn3EjN9eDyjzroo3s0Iba41NHWtOw6WepISc0KWGu/stZ+b61db61da619CMgC+vm6byIiInIUweEQV/LE2sLemjF96NoBKe7tb5buZG9Wnu86IyIiIiLl1asP9eKd7cIcyNjq2/5UUr2QIK4sU+ronV82+rA3vuGXSU1jzMXGmFeMMbOKp4dbY8yHx7mmqTHmXWNMqjEmzxiz2RjzojEm7jjXBRpjLgMigTme/BwiIiLiYWVXQK8BdTUBujePo3vzWADyi1x8NG+LbzskIiIiIuX54WJBAFf3SyE40Cl1tGDzAZZuS/dth6qZXyY1gYeBO4BuwI7jnWyMaQUsAq4F5gMvABuBu4G5xpgGFVzT2RhzEMgD3gAutNYu99QHEBERES8om9SsIXU1Aa4dUFrz6MNft5BXWOTD3oiIiIhIOX64WBBAQnQY53VJcu+/80vdKnXkr0nNe4G2QDRwayXOfx1oBNxlrR1lrX3QWjscJ7nZDniygmt+x0ma9gX+BbxvjOnkgb6LiIiIt5RLataMkZoAZ3dKJDE6DIB9B/P5ZulOH/dIRERERNzKjdT0n6QmwHVlFgz6bvlOUtN9X1e+uvhlUtNaO81au85ae9ylnYwxLYEzgM3Aa4e9PQ44BFxljIk4rI384pqaC621fwGW4CRTRUREpKZqWDapudp3/ThMcGAAV/VLdu+/O3sTlQhjpJYzxtxujFlWXE4p0xgz1xgz0tf9EhERqXPKjdT0n+nnAJ2axNC3ZX0ACl2W9+du9m2HqlGQrztQDYYXf/3RWltuCStrbZYxZjZO0rMvMPUY9wkAQo/XmDFm0VHeap+VlcX06dOP32Mvy8rKAqgRffGFuv75Qd8DfX59ftDnr62f37gKGGQCCbBFkLGVWVO+oyioXrlzfPU9SC6wBAdAgQtWpmYy/oufaVc/sFr7AJX7/CXniNdtBx4A1uHEmtcAXxpjelprl/m0ZyIiInXJ4SM1rQVjfNefE3T9wJb8ujENgE/mbeWu4W2ICK39KT+/HKl5gkr+ZB4t1b6u+Ks7LW+MedoYM8gYk1JcW/MfwFDgI+91U0RERKrKBgSTE15aV6he9jYf9qa8yBBD/6TS4PKnLQU+7I3UBNbar6y13xfPDlprrX0IyAL6+bpvIiIidUp0EwiJdLZz0+HQXp9250SNaN+IlAbOg/zM3EImLdru4x5Vj9qftoWY4q8ZR3m/5HhsmWOJwIfFXzOAZcDZ1tr/Ha8xa23Pio4bYxZFRUX1GDp0aCW67F0lIzNqQl98oa5/ftD3QJ9/OqDPr88/1Kf98Kq9vWClk8zs2bQe9Bha7m1ffg8at89ixoszAVi0p4jIlC70SqlfrX2ozOePioqqns7UcMaYi4EhOHXWuwJRwEfW2jHHuKYp8DhwFtAA2Al8CTxmrT1wjOsCgUuASGCOZz6BiIiIVIoxEN8GUn9z9vf+DpGNfNunExAQYLhuYAv+9tVKAN6bvYkxfZMJDPCf0aYnoy6M1Dyekp+wu7CVtXastTbZWhtqrW1krT2tMglNERERqQHK1tXcW3MWCwJolxjFaR2cANlaeODzZeQWaCX0Guxh4A6cpOaO451sjGkFLAKuBebjLEq5EbgbmGuMaVDBNZ2NMQeBPOAN4EJr7XJPfQARERGppPgyU9D9aAX0Ehf3bEpMeDAAm/dnM3X1bh/3yPvqQlKzZCRmzFHejz7sPI8xxkQZY5KMMUlAsMvlOu41IiIiUkXlVkBf5bt+HMVjF3QiIsSppblh7yFe+Xndca4QH7oXp0RRNHBrJc5/HWgE3GWtHWWtfdBaOxwnudkOeLKCa37HSZr2Bf4FvG+M6eSBvouIiMiJaFhmsaC9/rVYEEC9kCCu6NPcvf/atPXsP5jnwx55X11Iapak19se5f02xV+98Sf2Ppyn+juAzrt31/4suYiIiM+VS2rWrJGaAE1iw3nw7Pbu/TdmbGRlqsefrYoHWGunWWvX2UosVW+MaYmz+ORm4LXD3h4HHAKuMsZEHNZGfnFNzYXW2r8AS3CSqSIiIlKd/HykJsA1/VIIKp5yvnR7BkP/bzpvz9pIfmHtHGRXF2pqTiv+eoYxJqDsCujGmChgAJAD/OqFtp8Dxhdv/5CQkNDZC22IiIhIWXEtIDAEivIhKxVy0iE81te9KufKPsl8vXQn8zenUeSy3D9pGV/ePoDgwLrwvLnWGl789cey8SaAtTbLGDMbJ+nZF5h6jPsEAKGVadAYs+gob7XPyso65gr3JyIrKwvAY/eT6qWfn3/Tz8//6WfoP8Kz0+lTvJ23fTlzp0/3y5/f2S2C+HqDsyBlVm4hT3y7mremreGy9iF0axiIqWGrupd8j09GrY+crbUbgB+BFOD2w95+DIgA/m2tPeSFtrOstanW2lSgICCg1n+7RUREfC8wqPyT9hpWVxOcYu5Pj+5MaJATG6xMzeStWRt93CupopI/dEeb/VNSZ8A9e8gY87QxZpAxJqW4tuY/gKHAR97rpoiIiFQkN6wxLuOM/QvN309gYbaPe3RyLmodzD09QkmoV5q83J1teWlxHs8uzGVbVu0ZtemXIzWNMaOAUcW7icVf+xljJhRv77PW/qnMJbfhrCL5sjFmBLAa6AMMwwk8H/JSP6NwVskE1dQUERGpPo3aw+7itVb2rILmfX3bnwq0bBjJvae35envnaTri1PWcWbHRFo1jPRxz+QkldRvP1otgZLjsWWOJQIfFn/NAJYBZ1d2gUprbc+KjhtjFkVFRfU41gr3J6JkdIqn7ifVSz8//6afn//Tz9DPrG7tfiA+qEMC09c5owj97ec3DLit0MUHv27hpSlrycwtBGDVfhfj5uRwWe/m/PH0tsRHVmpyiFdFRUUd/6Sj8Nehg92Aa4pfZxYfa1nm2MVlTy4erdkLmICTzLwPaAW8DPSz1u73Uj9VU1NERMQXanhdzRI3DGxB5yZOLiy/0MUDk5bhch23fKP4p5LhEu4fsLV2rLU22Vobaq1tZK09rbIJTREREfGC+Dal2364WFBZIUEBXD+wBdP/PIyr+yUTWFxr02Xh43lbOevFWezN8u+FhPwyqWmtfdRaa47xSqngmm3W2muttY2ttSHFAeTd1to0L3b1OaBJ8Wt5QkKCF5sSERERt0anlG5vX+C7fhxHUGAAz4zu4i7ovnDLAT74dYuPeyUnqWQkZsxR3o8+7DyPMsZEGWOSjDFJaIaQiIjIyakFiwUdrn5ECI9f0Inv7x7EoDbx7uP7DuYxe/0+H/as6vwyqekvVFNTRETER5r3BRPobKcuhqyaO1vilKRobh3ayr3/zA9rSE3P8WGP5CSV/M+n7VHeLxn64a1hH5ohJCIiUlUNy9Zl9++RmodrmxDFv6/rzRV9mruPbT/gn3VDSyjLJiIiIrVPeBwk9y/dX1ezZ/TeMbw1bRo5tTSz84v479JUH/dITsK04q9nGGPKxdjFddYHADnAr15qXzOEREREqiq+zLPJWjJSsyxjDO0TS2tYbj/g3w/SldT0Ik0DEhER8aG2Z5Zu//6D7/pRCaFBgeWemm/Z799Pzeui4hruPwIpwO2Hvf0YEAH821p7yEvta4aQiIhIVZWtqZm2CeMq8F1fvKRpXLh729+Tmn65+rkfuQ8YV7KjaUAiIiLVqO3Z8OPDzvbGaVCQC8Fhvu3TMTSJLQ0wNf28ZjDGjAJGFe8mFn/tZ4yZULy9z1r7pzKX3AbMAV42xowAVuMsUjkMZ9r5Q17saxRQMvRCD9NFRERORkgExDSHjK1gi6iXncqhyGRf98qjmsbVc29r+rkci6YBiYiI+Ep8a2jQ2tkuyIZNM33bn+NoUuap+Q4lNWuKbsA1xa+Sob8tyxy7uOzJxaM1ewETcJKZ9wGtgJeBftba/V7sq2pqioiIeEJCR/dmoz01O348GWUfpO9Iz8Hlsj7sTdUoqelFmgYkIiLiY23PKt1eP8V3/aiEw0dqWuu/AWZtYa191FprjvFKqeCabdbaa621ja21IdbaZGvt3dbaNC93Vw/TRUREPKHbFe7NJju+J7DQv0czHi4iNIj6ESEAFBRZdmfl+rhHJ09ZNhEREam9Gnct3c7e57t+VEJMeDARIc6K7dn5RaRn174aTuI9epguIiLiIe1HQv1WAAQVHaLxzp983CHPqy11NRXtiIiISO1VdhHqGj7y0RhDUqymoIuIiIj4VEAg9L/Tvdt0+1dQVLseNjerJXU1ldT0Iq1+LiIi4mPlkpo1//ew6mqKiIiI1ABdL4eIhgCE5e2HFZ/7uEOeVW6kZpr/xpxKanqXCraLiIj4kjGl236Q1EzSCuhykvQwXURExIOCw6DPzaX7C9/zXV+8QNPPpTJUsF1ERMSXyo7UpGZPP4fDVqP04wBTfEIP00VERDypy2Wl22kbfdcPL2hadvp5uqafSwVUsF1ERMTH/KimJhy2AnqGkppyQvQwXURExJMiG5Vu56T5RSxZWRqpKSIiIlLT+XNNTT8OMKX66WG6iIiIhwWFUhgY5my7CiEv07f98aCyMWdqeg5FLv9M2CraERERkVrMf2tq7kjP9WFPRERERKQgOLp0J3u/7zriYfVCgoiPDAGgoMiyO9M/404lNUVERKT28rPp5wlRoQQGOInYfQfzyC0o8nGPxF9ooSARERHPK5/UTPNdR7ygSdm6mn46Q0hJTS9ScCkiIuJjfjb9PCgwgMToMPf+zgz/fGouPqGFgkRERDysMCiqdKeWJTXL19X0z8WCgnzdgVruPmBcyU5lg0uXy0VaWhpZWVnk5eVhPTyypF49Jxu/evVqj97XX9T1zw/6HtTUz2+MITQ0lKioKOrXr4/qoYl4gJ8lNcFZLGhHuvO0fMeBHFrER/i4R+InngPGF2//kJCQ0LkyF1Um7qypvzelcvTz82/e+Pkp5hSpvNo6/Rxqx2JBSmp61wkHly6Xi23btpGd7b0seckvxrqqrn9+0Pegpn5+ay25ubnk5uZy6NAhmjVrpiBTpKpM2Z2aP/0cICm2dKRmarp/BphS/ay1WUAWgDGmUgsFVTburKm/N6Vy9PPzb974+SnmFKm82p3ULDv9XCM15TAnE1ympaWRnZ1NUFAQiYmJREREePwXTFZWFgBRUVHHObN2quufH/Q9qKmf3+VycejQIXbt2kV2djZpaWnEx8f7ulsi/s0fR2qWfWqupKZ4UWXjzpr6e1MqRz8//+aNn59iTpHKKwguO/28tiU1/X+kph7H1DAlv7QSExOJiorSEzOROiQgIICoqCgSExOB0n8PRKQK/DCpWXYFdI3UFG9S3ClSNynmFKm8ciM1c2pXTc1mZZKa2/x0pKYilxomLy8PgIgI1c8SqatK/v6X/HsgIlXgZ6ufg1NTs8QOP31qLv5BcadI3aaYU+T46sr0853puRQW+ccAgLKU1KxhSoqz60m5SN1ljFME0NOLhInUTWWKavrJ36mySc3UDCU1xXsUd4rUbYo5RY6v/PTz2jVSMyw4kPjIUAAKXZbdWf73gEMRjIhIDVMSYIqIB/j59POd6bm4XPrPphyfMSbKGJNkjEkCgl0u//jzLiK+o5hT5PjKj9SsXUlNOKyuZpr/TUFXUlNERERqLz9MakaEBhFbLxiA/CIX+w7631Nz8Yn7gB3Fr867d+/2cXdERET8X21eKAj8f7EgJTW9SE/MRUREfKxcUrPId/04QeXqamqxIKmc54Amxa/lCQkJPu6OiIiI/zuipmYtK9dQtq6mkppyOD0xF6mk6dOnY4zh0Ucf9XVXRKQ2CYsp3T6013f9OEFJSmrKCbLWZllrU621qUCB6mSKHJ3iThGpLBsQTGFgWPFOEeRm+LZDHlZ+pKamn0t5emJ+kvbv38/bb7/NhRdeSOvWrQkPDycmJoaBAwfyzjvv4IlRryXBzNFeDz74YIXXFRUV8eKLL9KlSxfCw8OpX78+55xzDnPmzDlqW2lpadxzzz2kpKQQGhpKUlIS1113Hdu3bz/i3M2bN7v7EBkZSVZWVoX3tNbSqlUr97nTp08/qe9DRWbNmsXo0aNp3LgxoaGhNG7cmDPOOIPvvvvuuNdef/317j6tX7/eY33yhLFjx2KMYcuWLb7uiohUl9hmpdsZO8BPZk2UWyxISU0Rr6rtcWfbtm257bbbFHdWs5K4c/Pmzb7uiohUUbnRmjm1q65m2aTmNj9Magb5ugO1mbU2C8gCMMboifkJ+Oyzz7j11ltp3Lgxw4YNo3nz5uzevZvJkydzww038P333/PZZ595pLj1kCFDGDp06BHHBw4ceMQxay2XXXYZkyZNol27dtxxxx2kpaXx6aefMnjwYD7//HMuuOCCctfs37+f/v37s3btWoYPH85ll13GmjVreO+99/j222+ZO3cuLVu2PKKtoKAgDh06xCeffMJNN910xPtTp05l48aNBAUFUVhYePLfgMM88cQTPPLII8THx3PuuefSuHFj9u3bx2+//cb06dM555xzjnrt119/zbvvvktkZCQHDx70WJ9ERE5aSASE13cCUFcBHNzl6x5VSrnp5344FUjEn9T2uHPFihV8+OGH/Pjjj4o7RUROQkFwNOG5e5yd7DSof+S/o/6qWX3/nn6upKbUSG3btuW///0vI0eOpGwy+KmnnqJ37958/vnnTJ48mdGjR1e5raFDh1Z66snEiROZNGkS/fv3Z+rUqYSFOcPQb7nlFgYOHMiNN97I8OHDiYoqLSb817/+lbVr13Lvvffy/PPPu4+//PLL3H333dx222388MMPR7TVs2dPtmzZwltvvVVhcPnWW28RGhrK8OHD+f7770/wU1fss88+45FHHuG0005j8uTJ5T4HQEFBwVGv3bt3LzfeeCOXXnopu3btYsaMGR7pk4hIlcU2K32qnr7Nt32ppCZxZaef5/qwJyK1X22PO7OysvjXv/7FAw88oLhTROQkFAbV3sWCyj5I35mRS2GRi6BA/xmQ5z89lTpl+PDhnHfeeRw+ujUxMZFbbrkFoNy0lwMHDrin2CxatKjcNS6Xi6FDh2KM4cMPP6xSv/71r38BzlPlksAS4NRTT+XSSy9l7969TJo0yX380KFDfPDBB0RERPDYY4+Vu9cdd9xBSkoK//vf/9i4ceMRbQUFBXHttdeycOFClixZUu69ffv28eWXXzJ69Gjq169fpc9UwuVy8cADD1CvXj0+/vjjIwJLgODg4KNeXxIAv/baa1Xuy9y5cznttNOIiYkhKiqKM888k4ULF5Y758EHH8QYw7///e8K77Fo0SKMMZx33nkAGGN4//33AejcuTPR0dEYY0hJSSl3XVpaGn/5y1/o0KGDe/rZiBEj+PHHH49oIz8/n5dffpkePXoQFxdHvXr1SElJ4YILLmDKlClV/j6IiIfElJ2C7h9JTdXUFKk+dSHuvPnmmxV3HoW3484WLVq4p8gr7hTxT0csFlSLhAUH0jAqFIAil2VXpn89TFdSU/xOSYATFFQ60DguLo6JEyficrm49NJLyczMdL/32GOPMWPGDMaOHcuYMWOOuN/69et59dVXeeqpp3j33XdZt25dhe3m5eUxZ84c6tWrx6BBg454/+yzzwbg559/dh+bO3cuOTk5DBgw4IhgLSAggDPOOAOAadOmVdjmDTfcgDGGt99+u9zx999/n/z8fG688cYKrzsZc+bMYdOmTZxzzjnExcXx7bff8swzz/DSSy8xd+7cY147YcIEvvzyS9544w0aNGhQpX7MmzePoUOHEhoayu23387ZZ5/N1KlTGTRoELNmzXKfd8sttxAQEMCbb75Z4X1Kjt98880AjBs3jq5duwJw66238uCDDzJu3Djuuece9zVbtmyhZ8+ePP300zRs2JBbbrmFSy+9lNWrV3PWWWfx1ltvlWtj7Nix3H333RQUFHD11Vdz1113MXjwYJYvX17hKAgR8ZHY5qXb6Vt9148ToJqacqKMMVHGmCRjTBIQ7Ik6kKK4ExR3QtXizrvvvptx48Yp7hTxY+WTmrWrpiYcvliQn8Wd1lq9quEFLOrRo4c9nlWrVtlVq1Yd97yqyMzMtJmZmV5tw1sKCgpsp06dLGB/+OGHI95/5plnLGAvu+wya621P//8sw0ICLAdOnSwBw8etNaWfv5p06ZZoMLX6NGjbVpaWrl7r1ixwgK2U6dOFfZtwYIFFrC9e/d2H3v11VctYO+4444Kr3n22WctYO+//373sU2bNlnADhgwwFpr7YgRI2xsbKzNzs52n9O+fXvbpk0b63K57JVXXmkBO23atEp8B22570FZzz//vAXs7bffbjt37nzE92Tw4MF2z549R9xr8+bNNjo62o4ZM8Z9bMiQIRaw69atq3Sfyv48XnnllXLvffnllxawrVu3tkVFRe7jI0eOtIBdtmxZufOzsrJsZGSkbdasmS0sLHQfv+aaayxgly9fXuHfgSFDhlhjjP3kk0/KHT9w4IDt2rWrDQsLs7t27bLWWpuenm6NMbZnz57l2iixb9++Sn/2injz34Jp06ad0J+X2kafvw5+/jmvWTsu2nl9fY9ffA+Kily2zUPf2eQHvrHJD3xjs3ILPHbvynz+Hj16WGCRrQExlF6VjjUfLft7u3Hjxkf56Zaq7O8af44dq8ITcWcJX8adJT8/X8WdFalLceemTZsq7ENl487MzEy7bds2r8Wd1fH/z7rOH+IOObpp06bZje/dWhpL/vSor7vkcXd8vNgdc362cFu1t1+VuFM1Nf1MyoPf+roLlbb56ZEev+eDDz7IihUrOOecczjzzDOPeP/Pf/4z06ZNY+LEiXTt2pWXX36ZkJAQPv30UyIiIsqd27BhQ55++mlGjhxJSkoKubm5LFy4kL/+9a98/vnn7Nq1i5kzZ7qnImVkZAAQExNTYd9Kjqenp7uPncw1h7vxxhuZOnUqn332GVdffTWzZs1izZo1PPPMMx4pWF9izx6n8PEbb7xBixYtmDJlCn369GHLli3cd999/O9//+OSSy4pN/3K5XJxzTXXEBkZycsvv+yRfrRu3Zrbbrut3LELLriAIUOGMGPGDGbNmsWQIUMAZ8Tlt99+y/jx43nllVfc53/00UccPHiQP//5zwQGBlaq3aVLlzJjxgwuvvhiLrvssnLvxcbG8thjjzFq1Cg+//xzbrvtNowxWGsJDQ09YroaUOWRAyLiQWVXQE/fBpG+60plBQQYmsSGs2nfIcAZrdk24cjpmSJlPAeML97+ISEhoXNVb6i4U3Gn4k7fx51XXXWV4k4RHysIrr01NeGwFdDT/GsFdCU1xW+8/PLLPPfcc7Rv354PPvigwnNKat1069aNv/zlL4AzHaRz5yPj+o4dO9KxY0f3fmRkJGeddRb9+/enW7duzJ49m6+//vqIVSWPxjqjJE4o4KvMNRdeeCHx8fG89dZbXH311YwfP57g4GDGjh1b6XYqo6ioyN2nSZMmuafMdOzYkS+++IK2bdsyY8YM5s6dS79+/QB44YUXmDFjBt9++y1xcXEe6cegQYMqDNaGDh3KjBkz+O2339zB5dlnn02LFi344IMPeOaZZ6hXz1m5bfz48QQGBnLDDTdUut2SqU4ZGRkVFvDfu3cvAKtXrwYgOjqa8847j6+//ppu3boxevRoBg0aRJ8+fdz9EJEaolxNze3Q1HddORFJsWHupOaOA0pqyrFZa7OALABjTEFFv0ul8hR3Ku5U3CkiJcpNP8/R9POaRNGOF6m2kee89tpr3H333ZxyyilMmzbtmEXKGzZsyODBgwHnqeVVV111Qm1FR0dzxRVXADBz5kz38ZKn2yVPwQ9XUk+p7NPxk7nmcCEhIVx99dX88ssvzJ07l0mTJnH++efTqFGjyn6kSikJDlu2bOkOLEuEh4e7RyjMnz8fgHXr1vHQQw9x7bXXcs4553isHwkJCRUeT0xMBMp/LwMCArj55pvJyMjg008/BZxC7YsXL+a8884jKSmp0u3u3+88cfvpp5947LHHjni9/vrrABw8eNB9zaeffsq4cePIyclh3LhxDB8+3P1nbvfu3Sf2wUXEe8rW1MzYBsX/sa/pmmixIBGfUNypuFNxp4iUVX6kZu1LajaLK304sv2ARmpKqfuAcSU7nvhl44mpNVlZWQAVrjJYE7344ovce++9dOrUialTpx43qJo4cSITJ04kPj6effv2cddddx1RaPt4GjZsCDirSJZo3bo1gYGBbNy4kcLCwnIF4wF3ofe2bdu6j7Vr1w6AtWvXVthORddU5MYbb+T555/nD3/4A7m5ue4VHz2ppK+xsbEVvl8SfObkOP+xXrlyJXl5ebz33nu89957FV7Tpk0bAL744gtGjRpVqX4c7e/Jrl27gCMD8euuu45x48bx5ptvcu211x5RqL2ySu770ksvcdddd1XqmvDwcB599FEeffRRtm3bxsyZM5kwYQIffvghmzdvLldgXkR8KDwOgiOg4BDkHySo8CCFwTX/d6BWQBdfK4k7/S12rArFnYo7oWbFnSV//xR3ivhObV79HPx7pKaSmt7l8dpGdc0zzzzDgw8+SLdu3fjpp5+Ij48/5vkbNmzgpptuomHDhixatIgxY8bw9ttvM2LEiCPq1RzLr7/+CjhPj0uEhobSv39/Zs2axaxZsxg2bFi5a77//nsAhg8f7j7Wt29fwsPDmT17NllZWeX+M+Byufjxxx8BjrjX4dq3b+9ehTElJYXTTjut0p+lsgYPHkxQUBDr1q0jPz+fkJCQcu+vWLECgJSUFPfX66+/vsJ7ffvtt+zatYtLLrmE6Oho9zWV8csvv+ByuY6YClRSU6l79+7ljjds2JCLL76Yjz76iNmzZ/PJJ5+QkpLiXuGzrJI6RyVTnsrq27cvALNmzap0UrOsZs2aceWVV3L55ZfTvn17fvnlF/bv368aRyI1gTFOXc29awAIy93LQT9IamoFdJHqpbjTobhTcaeIlFfba2qWfZC+KzOXwiIXQYH+MbHbP3rpp6y1WdbaVGttKqDaRifo73//Ow8++CA9e/Zk6tSpxw0s8/PzufTSSzl48CDvv/8+zZo14+OPP6ZBgwbcdNNNrF+/vtz5s2fPpqKSAB9++CGffvopISEh/OEPfyj33q233grAww8/TG5urvv4ggUL+PTTT2nYsCGjR492H4+MjOSqq67i0KFDR9TLefXVV9m8eTNnnnlmuSD2aMaPH88XX3zB5MmTK6z9c7gJEyZgjKl0DaT4+HguvfRSMjIyePzxx8u999NPP/G///2PmJgYzjrrLAC6devG22+/XeGr5On7U089xdtvv023bt0q1QdwRhGUTLkp8dVXXzFjxgxat27NoEGDjrim5OdS8vO/6aabjllEffv27Ue816tXLwYNGsTkyZN59913K+zb8uXL3YXt9+7dy7x5844459ChQ2RlZREUFHREgC4iPlSmrmZY7h4fdqTyyk0/97On5iL+prbHnW+++abizgpUR9y5devWI9470bhz3759ijtFfKj8SM00vyllVFlhwYE0igoFoMhl2ZmRe5wrag6N1JQa6f333+dvf/sbgYGBDBo0qMIVDlNSUsoFTvfffz+LFi3ij3/8I2effTYATZo0YcKECZx33nlcdtllzJkzx33+lVdeicvlon///jRt2pTc3FwWLFjA/PnzCQoK4s033zziSe9ll13G5MmTmTRpEt27d+e8885j//79fPrppxQVFfHWW28RHR1d7pqnnnqK6dOn8/zzz7NkyRJ69+7N6tWr+eqrr2jUqBGvvfZapb4n7du3p3379pX8DuIOnA+frnQszz//PPPmzePJJ59k5syZ9O7dmy1btvDFF18QGBjIW2+9ddRpQp5y1llncd999/H999/TtWtX1q9fz+TJkwkLC+Odd96pMGgcMGAAXbt2ZenSpQQHB3PddddVeO8RI0bw7LPPcueddzJq1Cjq169PbGwsd9xxBwAff/wxw4cP5/rrr+fll1+mT58+xMbGsn37dpYtW8aKFSuYO3cujRo1YseOHfTt25cOHTrQo0cPmjVrRmZmJt988w27du3irrvuqhPT9ET8RpkV0EPz9vqwI5XXJE4jNUWqgzfjzpJEky/jzuXLl/Ptt98q7qxAdcSdN954IxdffDGRkZEnFXd27NiR1NRUBg4cqLhTxEdsQDCEREF+FtgiyM2A8Fhfd8ujmsaFsycrD3CmoDer7yeLkFlr9aqGF7CoR48e9nhWrVplV61addzzqiIzM9NmZmZ6tY2qGjdunAWO+RoyZIj7/P/+978WsL169bL5+flH3O/ee++1gL3rrrvcn//pp5+2p512mm3atKkNCwuzoaGhtmXLlnbs2LF2yZIlR+1bQUGBff75522nTp1sWFiYjY2NtWeffbadPXv2Ua/Zv3+/veuuu2zz5s1tcHCwTUxMtNdee63dtm3bEedu2rTJAnbAgAGV+l5deeWVFrDTpk0rd/yee+6xgP3pp5+OuOZYfwb2799v7733XpuSkmKDg4Nt/fr17fnnn2/nzp1bqf5Ya+2QIUMsYNetW1fpa6ZNm2YBO27cODtnzhw7YsQIGxUVZSMjI+3pp59u58+ff8zrX3zxRQvYiy+++JjnPffcc7Zt27Y2JCTEAjY5Obnc+5mZmfbJJ5+0PXr0sBERETYsLMympKTYc845x7755pv24MGD1lprDxw4YB977DE7bNgwm5SUZENCQmxiYqIdMmSI/fjjj63L5ar0Z6+IN/8tmDZt2hF/XuoSff46+vlnPmftuGhrx0XbrW+N8YvvQW5BoU1+4Bub/MA3tsWD39j8wiKP3LcyfwZ69OhhgUW2BsRQevk+7vSH2LEqvBl3lvBl3JmQkGDHjBnjs7jzWOpC3Nm+ffsqxZ2ZmZl269atXos7q+P/n3VdnY29agn3z++FTu5Y0u5b7+tuedydHy92x52fLtharW1XJe401tauYbM1lTFmUY8ePXosWrTomOetXr0agA4dOnitL3Wp2HtF6srn79GjB0FBQe5VI8uqjd+DsWPH8v777zNlyhRGjBhxzHP94fN789+CkjpRQ4cO9fi9/YE+/3SgDn7+5ZPgc6ce2974fqzs9KBffA96PznF/dT8lweG0TSu6k/NK/NnoGfPnixevHixtbZnlRuUaufpuNMffm/K0VXHz+9YcWdtdCJxZ1V5++dXHf//rOvqbOxVS7h/fmsfhdTfnIPXT4Fmp/qsT97wzx/W8Pr0DQDcNaINfzz92IvKeVJV4k5NPxephTIyMli6dCmff/65r7tSLbZt28bEiRPp0KFDuYL5IiJuflhTE5zC7SVJzR0HcjyS1JTayRgTBZRkPYIrqt8o4g2KO0WkTqhXZiGunDTf9cNLyk43334g24c9OTFKaorUQjExMRWutFjbfPzxx6xdu5aJEyeSl5fH3//+d4wxvu6WiNREflhTE5y6mku2pQOQmqG6mnJM9wHjSnZ2797tw65IXaK4U0TqhPD6pdu1cAX0pmVquW/3owUqldQUEb81fvx4Zs6cSbNmzXjhhRfKrQAqIlJOZCIEBIOrgJCCTAKK/GNVR62ALifgOWB88fYPCQkJnX3ZGZHaRnGnSB1XdqRmrUxqlo7U9KeYU0lNEfFbJfVNRESOKyAAYprAgc0AhOX6x2jNcknNdP9IxIpvWGuzgCwAY0xBRas2i8jJU9wpUseVS2rWvunnSbFh7u2dGTkUFLkIDqz5sUTN76GIiIiIJ5Srq+kfSc2kcklN/3lqLiIiIlKr1Isr3a6FIzVDgwJJiA4FwGVhV4Z/PExXUlNERETqhtjm7s3QPP9YLKjsSM1UJTVFREREfKOWTz+H8lPQt6X5x2JBSmqKiIhI3eCHIzUPr6lprfVhb0RERETqqFo+/RygmR8uFqSkpoiIiNQNMU3dm2G5/jFSMzo8iMhQpwR6TkER6dkFPu6RiIiISB1UNqmZUzuTmmVHam4/oJGaIiIiIjVHrP+N1DTGlCvcrrqaIiIiIj4QXr90u9ZOP9dITSnDGBNljEkyxiQBwS6Xy9ddEhERqbvKTD/3l5qacPgK6P4RYIqIiIjUKvXKJjXToBaWBCo/UtM/Yk4lNb3rPmBH8avz7t27fdwdERGROqzM9PPQvANQ5B9TuZMOq6spIiIiItUsKBRCopxtWwS5Gb7tjxeUH6npH9PPg3zdgVruOWB88fYPCQkJnX3ZGRERkTotKBROe5Q12/aRF9qQrhhf96hSmsRpBXQRERERn6sXB/lZznb2fgiP9Wl3PK1xbBjGOINQd2Xmkl/oIiSoZo+FrNm983PW2ixrbaq1NhUoCAjQt1tERMSnBt7Lrsanc6B+Nwj0j2e7mn4uIiIiUgPU8hXQQ4MCSYhyarm7LOzMqPlxp7JsInKECRMmYIxhwoQJvu6KiEidVzapqZGaIlLbKO4UEb9RLqlZOxcLalbfvxYLUlJTaqxJkyZx5513MmjQIKKjozHGMGbMGI/dPz09nWeffZYrr7ySU045haCgIIwxTJky5ajXjB07FmNMuVdgYCANGjRg+PDhfPTRRx7pW2ZmJvfccw+DBg0iKSmJsLAwGjVqRO/evXnxxRc5dOjQEdcsWbKERx99lAEDBtC4cWNCQkJo0qQJl19+OYsXL/ZIvzxp6NChGOMfUz9FRHwpSSM1RbxOcafiThGR44pMgIiG0LA9BAT6ujdeUX6xoJpfV9M/5l1JnfTEE0+wdOlSIiMjadq0KWvWrPHo/Tdv3sz9998PQNOmTYmPj6eyizldcMEFdOvWDYD8/Hw2btzIf//7X6ZNm8aqVat48sknq9S3tLQ0xo8fz6mnnsrIkSNp2LAhGRkZ/Pzzz9x777289dZbzJ07l+joaPc1t9xyC/PmzaNnz55cdNFFREZGsmTJEiZOnMikSZP4z3/+w4UXXlilfomISPVLiA4jMMBQ5LLsO5hPbkERYcG1M5AW8RXFnYo7RUSOa9Trvu6B15VfLKjmP0xXUlNqrBdeeIGmTZvSunVrZsyYwbBhwzx6/+TkZKZMmUL37t2pX78+Y8eO5f3336/UtaNGjWLs2LHlji1atIhevXrx/PPP88gjjxAWFnbSfWvWrBkZGRkEBwcf8d6YMWP46KOPeOONN9zBMcCVV17Jhx9+SOvWrcud/9FHHzFmzBhuvPFGRo4cSUhIyEn3S0REql9ggCExOsw9SjM1PYeWDSN93CupaYwxUUDxsqwEu1wuX3bH7yjuVNwpIiL+l9TU9HOpsYYNG0abNm0qNVXkwIEDpKSkEBoayqJFi8q953K53FNOPvzwQ/fxuLg4RowYQf369T3S3549e1K/fn1yc3PJysqq0r0CAwMrDCwBLrnkEgDWrVtX7vidd955RGAJTtDZpk0b9u/fz/Lly0+4L99++y39+/cnIiKCuLg4Lr744iPavuyyyzDGMHPmzArvMWnSJIwx3HnnnWzevBljDDNmzAAoN6Vq6NCh5a7bvn07d9xxBy1btiQ0NJQGDRpw/vnns2DBgiPayMrK4u9//zudOnUiOjqaqKgoWrVqxaWXXnrEnwkREX9TfgX0XB/2RGqw+4Adxa/OlR0FKA7FnYo7FXeKiPjf9HMlNaVWiIuLY+LEibhcLi699FIyMzPd7z322GPMmDGDsWPHerQ20uEWL15MWloaycnJNGzY0GvtfP311wB06dKl0teUBKpBQSc2OHvy5MmMGjWKpk2bcvfdd9OvXz8+//xz+vbty++//+4+77bbbgPgzTffrPA+48ePB+Cmm24iNjaWcePGkZycDMC4cePcr7KjEBYvXky3bt14/fXXadeuHXfeeSfnnXceM2fOZODAgXz33Xfuc621nHXWWfztb38jOjqaG264gVtvvZXevXszc+ZM5s6de0KfW0Skpim/AnrNDzDFJ54DmhS/lickJPi4O7WX4s5jU9ypuFNE/Je/jdTU9HOpNfr27cuTTz7JAw88wM0338wnn3zCtGnTeOKJJ+jQoQOvvvqqx9r68ssv2bx5M+DUNtq8eTP//e9/adq0KR988IHH2iksLOSJJ54AnHpHM2fOZOnSpQwbNowbb7yxUveYN28eq1atokmTJnTq1OmE2v/666/5+uuvOffcc93HXnrpJe655x5uu+02pk6dCsDgwYPp2LEjn3/+OS+99BLx8fHu8zdt2sSUKVPo378/nTt3BuDRRx9l+vTpbNmyhUcffbTCz/2HP/yBgwcPMm3aNIYMGeJ+LzU1lVNPPZXrr7+ezZs3ExoayooVK5gzZw6jRo3iiy++KHcvl8tFRkbGCX1uEZGapnxSUyM15UjW2iwgC8AYUxAQoLEL3qS4s2KKOxV3ioh/axwTjjFgLezKzCWvsIjQoJpby11JTX/zaEyVbxF1/FM849Hq/4X+5z//mWnTpjFx4kS6du3Kyy+/TEhICJ9++ikREREea+err77iq6++KncsPDycK664wh1AeUJhYSGPPfZYuWNXXXUVr7/+eqVqJx04cICrrroKgOeff57AwBP7x2j48OHlAkuAO+64g1deeYWff/6ZLVu2uJ9833rrrdxxxx28//773Hfffe7zx48fj7WWm2++udLtfvvtt2zYsIE//elP5QJLgKSkJO6//37uuecepk6dyjnnnON+Lzw8/PBbERAQQFxcXKXbFhGpicqtgO4HT82lliiOO6stdqwKxZ1VprhTcaeISEhQAI2jw0jNyMVa2JmeS0q8536neZoe4UqtYozh3//+N0lJSfzlL39h586dvPTSSx4N+ADee+89rLVYayksLGTz5s08+OCDPPvss/Tt25eDBw96pJ2wsDCstbhcLrZv386ECROYMmUKvXr1cj+xP5pDhw5x/vnns27dOu6//37+8Ic/nHD7hwd24NRdGjhwIAC//fab+/jVV19NZGSke8oPQEFBARMmTCAuLu6E2i+ZtlPyRP3w1/z58wFYvXo1AKeccgrdunXjk08+YcCAAfzzn/9kzpw55Ofnn/BnFhGpicrX1FRSU6QmUNxZSnGn4k4RqT3K19Ws2XGnRmpKrdOwYUMGDx7MxIkTadCggfuJsbcEBgaSnJzM3/72N9auXctHH33EK6+8wl/+8hePtWGMoUmTJlxzzTW0a9eOfv36cccdd/DNN99UeP6hQ4cYOXIkv/zyC3/84x955plnTqrdo9XjSkxMBCg3vSYqKooxY8bwxhtvMG3aNIYNG8ZXX33Frl27uOeee05oVc79+/cD8Nlnnx3zvJIgPjAwkJ9//pnHH3+cSZMm8cADD7j7dM011/CPf/yDyEitFCwi/qtJbOm/oTuU1BSpMRR3Ku5U3CkitU3TuHDmb3a2a/piQUpq+hsPTK0pWSExKsovJhOdsIkTJzJx4kTi4+PZt28fd911F2+99Va1tN2nTx8++ugj9xNdb+jbty+xsbFMnz69wvezsrIYOXIks2bN4v777z/pwBLgaCun7tq1C4CYmPLlEG699VbeeOMN3nzzTYYNG1auUPuJKLnvV199xfnnn1+pa+Li4njhhRd44YUXWL9+PTNmzODNN9/k1VdfJT093aM1p0REqlvZ6ec7M3JwuSwBAcdfpVmkSorjztoeO1aF4k7FnYo7RaS28afFgjT9XGqVDRs2cNNNN9GwYUMWL17M4MGDefvtt5k4cWK1tH/gwAHAKRLuLVlZWWRmZla4omRGRgZnnHEGs2bN4qGHHqpSYAkwY8aMI44VFRXxyy+/ANC9e/dy73Xp0oUBAwbwxRdfMG/ePKZMmcLgwYPp0KHDEfcpqbNUVFR0xHt9+/YFYNasWSfV79atW3P99dczY8YMIiMjj6hDJSLib+qFBBFXz1lRuKDIsvdgno97JCKKOxV3guJOEal9yk8/r9kjNZXUlFojPz+fSy+9lIMHD/L+++/TrFkzPv74Yxo0aMBNN93E+vXrvdr+gQMHeO+99wAYOnRoufceffRRjDEVrrhYkSVLlpCenn7E8fz8fO644w5cLhcjR448ov3TTjuNX3/9lccee8y9emVV/Pzzz0dMNXr11VfZsGEDw4YNcxdrL+vWW28lPz+f0aNHY63llltuqfDeDRo0AGDr1q1HvHfBBRfQqlUrXnvtNb777rsKr587dy7Z2c4/sJs2bWLlypVHnHPgwAHy8vIqLOQuIuJvytbV1BR0Ed9S3Km483CKO0Wktig7UnNbDR+pqennUmN9+eWXfPnll0DptJO5c+cyduxYAOLj4/m///s/9/n3338/ixYt4o9//CNnn302AE2aNGHChAmcd955XHbZZcyZM6dcG3/605/Yt28fgPsp8LPPPsuHH34IwKhRoxg1alSFfSspmF5UVMT27dv5+uuv2b9/P6eeeuoRAVXJE/SKnnJXZMKECYwfP56hQ4eSnJxMbGwsqamp/Pjjj+zatYt27dqV++wAF110EQsXLqRVq1a4XK4KA9lRo0bRrVu3SvUB4LzzzuPCCy/kwgsvpHXr1v/f3r3HSVHdeR///Aa8MDiCA6jxEhC8XyKiooLGERNhzSYPom5iHlHXxCSbvLxt2CjZPA/gmn2i5qJx1dwxMdlLTKKIMYkr4l2jC16CKFERA4pGQGUcLnL5PX+camyG7pme6aqurq7v+/WqV83U5dT59enu+c2pqlM8/fTT3HXXXbS2tnLjjTeW3OfMM8/k0ksv5dVXX2Xw4MFMmjSp5HYnn3wyt956K5MmTeLUU0+lX79+DB06lMmTJ7Pddtvxm9/8hvHjx/Oxj32MMWPGMHLkSJqbm1m6dClPPPEEixcvZvny5TQ3N/P0009z2mmnceSRR3LooYeyxx578OabbzJr1iw2bNiwZawjEZEs22NAPxa8uhoIT0Af9UE9YVckLknlndtvv/2WfZR3dk15p4hIfcjSlZpbnqSnKdkJmDdq1CjvzsKFC33hwoXdbleN1atX++rVqxM9RhymTZvmQNlp6NChW7a94447HPCjjjrK33vvvW3KuvTSSx3wiy66aKv4hw4d2uUxpk2btlU55557bsntWlpa/Oijj/arr77a165du83xJ06c6E1NTb5o0aKKYn/ooYf8/PPP94MPPtgHDhzoffr08V122cXHjh3r11xzjXd0dGyzT3exAD5z5kx37/49MHPmzC3bz54924899lhvbm72AQMG+KRJk7qN45JLLnHAp0yZUnabjRs3+tSpU32fffbxvn37OuAnnnjiVtu88cYbftlll/khhxzi/fr18/79+/u+++7rp59+ut9yyy2+YcMGd3dfunSpT5061ceMGeO77babb7/99r7nnnv6hAkT/K677trm2Fn4DCT5XTB37lyfO3duImVngeLPd/zu2X0Npt+xwIdedqcPvexO/959L/a6nEriHzVqlAPzvA5yKE3p551Z+LtZraTyzmJp5Z3dtV/SeWd3Gj3vrFbSn79a/P+Zd1nNOyTIY/u9t3GT73P5nVvyznUbNiZ6vGryTvOQ+EgRM5sKTAIOANYDjwFT3X1BFWXOGzVq1Kh58+Z1ud1zzz0HUHIsmLjkfbD3Wsfv7gwZMoRx48bxy1/+sibH7E7Sr0FbWxsPPPAAixYtYr/99kvkGNXIwmcgye+CwmD/nW9XywvFfx+Q3/ghu6/Bjx5czJW/Dd8N5xw3lCv+16G9KqeS+I888kjmz58/392P7NVBJFVx551Z+LspQam8s9Hbr97zzmol3X61+P8z77Kad0iQ1/Yb+417twx3NHdKG/sM7p/YsarJOzWmZmltwI3AGGAcsBG4x8xa06yUZNOCBQtYuXIlU6dOTbsqNfH4449z//33M378+IZMLEVE0rJn0RPQX9OYmiJSgvJOERGJw55bPQG9fm9B15iaJbj7+OLfzWwy8A4wFpidSqUksw477DDycEX0TTfdxKuvvsrMmTNpampixowZaVdJRKSh7DGwOLlUp6aIbEt5p4iIxGGvXfrx+Mvh53rOOzPZqWlmZwAnAiOBw4EW4BfufnYX++wFXAFMAAYBy4HbgRnu/lY3h2whXNXa3XYiuXXVVVexbNkyhg8fzi233MLo0aPTrpKISEMpPmOuKzVFJM+Ud4qIJCsrDwvKZKcm8DVCZ+a7wDLgwK42NrMRwCPArsAs4HlgNHAxMMHMxrr7yi6KuA54Cni06pqLNKjCUzlFRCQZg/pvzw59m1i/cTOr122kfd0GWnbcLu1qiYjUnPJOEZFk7VV0Mn3pqvo9mZ7VMTUvBfYHdgb+oYLtbyR0aF7k7hPd/XJ3Hwd8h/AwoK+X29HMvg0cD5zu7puqrrmIiIhIL5hZp3E116VYGxERERFpVHtlZEzNTHZquvtcd3/BKxgwxsyGA6cAS4AbOq2eBnQAk81sm0c5mdl3gLOAce6+uOqKi4iIiFSheFzNV9+u3wRTRERERLJr761uP6/fKzWzevt5T4yL5ne7++biFe7ebmYPEzo9jwXmFNaZ2XXAp4A2d3++0oOZ2bwyqw5sb2/nvvvu63L/5uZmmpubaW9vr/SQPbZpU7jgNMlj1LO8xw96Deo9fndn06ZNrFmzptvvjN4oxJ1E2Vmg+PMdP2T7NTh8p40MP3h7Wnc0Ov6ykPtef67HZVQSf71+P4qISHzy8FApEemdDwzYkUs/sj97t/Zj79bm7ndISR46NQ+I5n8us/4FQqfm/kSdmmZ2AzAZmAi8ZWa7R9u+6+7vJlfV923evJmmpkxeSCsiVVKCKSLlHL17HlK3fDGzqcAkQs66HngMmOruC2pwbNxdeadIThVyTjNLuSYiUm/69mni4o/sl3Y1upWHzHhANH+nzPrC8oFFy74YzedsvSkzgOldHczdjyy13MzmtbS0jGpra+tqd15++WXWrVtHU1MTLS0tXW7bW4WrL5Iqv97lPX7Qa1Dv8be3t9OnTx+GDBnCPvvsE3v5hauzuvs+alSK/z4gv/GDXoNK4q/X78cG1UYY//0JwIArgHvM7GB3X5XkgXfYYQfWrVtHR0eH2lwkhzo6OoDwXSAikkV56NTsTuG01JZLo9w9tVNVLS0trFu3jtdffx2A/v37Y2Y6eybS4Nwdd6ejo2PL51//YIqIND53H1/8u5lNJpx0HwvMTvLYyjtF8kc5p4g0kjx0ahauxBxQZv3OnbaLjZm1AIW/ENtt3ry5q80BaG1tpaOjgzVr1rBs2bK4qwS8P55gnz59Eim/3uU9ftBrkIX4m5ubaW1tTbsaIiK5Z2ZnACcCI4HDCbndL9z97C722YtwxeUEYBCwHLgdmOHub3VzyBbCwzy7265qleadWfi7KeWp/bIt6fZTzikiWZaHTs1F0Xz/MusLgwSUG3OzGl8mPGEdgDfeeKPbHZqamth7771ZtWoV7e3trF+/Pvbx9dasCU9LzesZubzHD3oN6jV+M2OHHXagpaWF1tZWjW8mIlIfvkbozHwXWAYc2NXGZjYCeATYFZgFPA+MBi4GJpjZWHdf2UUR1wFPAY9WXfNuVJp31uvfTamM2i/bkmg/5Zwi0ijy0Kk5N5qfYmZNxU9Aj66kHAusJQzKHrdvAT+Ifv79brvtdlglOzU1NTF48GAGDx6cQJXeH0tr9OjRiZRf7/IeP+g1yHv8IiLSI5cSOjNfJFyxObfrzbmR0KF5kbtfX1hoZt+Oyvo68IVSO0bbHA8c7+6bqq969yrJO/V3M9vUftmm9hMRKa/hT8m4+0vA3cAw4EudVs8A+gM/c/eOBI7d7u6vuftrwAadARMRERHJFnef6+4veAW3zpjZcOAUYAlwQ6fV04AOYLKZ9S+x73eAs4Bx7r646oqLiIiINLhMXqlpZhOBidGvu0fz48zs5ujnFe4+pWiXLxJuA/qumZ0MPAccA5xEuO38nxOqZ4/H1BQRERGRzBoXze8uvjsIwsluM3uY0Ol5LDCnsM7MrgM+BbS5+/OVHszM5pVZdWB7e/uWK7yq1d7eDhBbeVJbar9sU/tln9ow29R+ySu8xr2RyU5NwkDt53ZaNjyaAF4BtnRquvtLZnYU7w/YfiphwPbvEgZsX5VQPXs8pqaIiIiIZNYB0bzcWO0vEDo19yfq1DSzG4DJhBP2b5lZ4YT9u+7+bnJVFREREcm2THZquvt0YHoP91kK/H0S9elCr8bUFBEREZFMGhDN3ymzvrB8YNGyL0bzOVtvygy6yXfd/chSy81sXktLy6i2traudq9Y4eqUuMqT2lL7ZZvaL/vUhtmm9kteNQ9Cy2SnZla4ezvQDmBmGlNTREREJN8smm8Zn9Pdrcy2IiIiItIFdWqKiIiIiMSjcCXmgDLrd+60XWw0lruIiIjkjTo1E6TkUkRERCRXFkXz/cus3y+alxtzsxoay11ERERyxdy9+62kV8xsOkXJZd++ffnQhz6UXoUihSdLVTNuQZblPX7Qa6D4FT8o/rzGD3oNKon/ueeeY+3atavcfVCt6pUFZtYGzAV+4e5nl1g/AngRWAKMKH4CenSyeznQBAxx946Y61Z8Mv3Zfv36DTzooINiKTvvn5msU/tlm9ov+9SG2ab2S141eaeu1ExW8YOCHtu4ceNO8+fPfznNCkUOjObPp1qL9OQ9ftBroPgDxZ9PeY8f9BpUEv8wYHXyVWks7v6Smd1NeML5l4Dri1bPAPoD34+7QzM6dvFY7m+vXbt28/z585fEVHzePzNZp/bLNrVf9qkNs03tl7xh9DLv1JWaOWRm86D8EzMbXd7jB70Gil/xg+LPa/yg1yDv8feUmU0EJka/7g6MBxYDD0bLVrj7lKLtRwCPALsCs4DngGOAkwi3nY9x95W1qHtc9J7JNrVftqn9sk9tmG1qv/qmKzVFRERERMobCZzbadnwaAJ4BdjSqRldrXkUcAUwATiVcNv5d4EZ7r4q6QqLiIiI5IE6NUVEREREynD36cD0Hu6zFPj7JOojIiIiIkFT2hUQERERERERERER6Ql1aoqIiIiIiIiIiEimqFNTREREREREREREMkVPPxcREREREREREZFM0ZWaIiIiIiIiIiIikinq1BQREREREREREZFMUaemiIiIiIiIiIiIZIo6NUVERERERERERCRT1KkpIiIiIiIiIiIimaJOTREREREREREREckUdWqKiIiIiIiIiIhIpqhTs0GY2V5m9hMze83M1pvZEjO71sx26UEZZ5jZ9Wb2oJmtNjM3s58nWe+4VBu/mQ0ys8+a2W1m9qKZrTWzd8zsITP7jJnV9Wclpva/yszmmNnSKP5VZvakmU0zs0FJ1r9accRfoszJ0WfAzeyzcdY3bjG1/5KieDtPrydZ/zjE+R4wsxPM7Ndmtjwqa7mZ3W1mpyZR9zjE8B14XhftX5g2JR1Hb8XV/mb2saitl0Xfg4vN7FYzOy6puschpu8AM7PzzewxM2s3szXR34CLzKxPkvWX2ovxMxP731/pXgzf+ZnOextB3nPXrMt73pl1ec8bG425e9p1kCqZ2QjgEWBXYBbwPDAaOAlYBIx195UVlPMUcDjwLrAMOBD4hbufnUzN4xFH/Gb2BeAmYDkwF/gLsBswCRgA/Bo40+vwAxNj+78HzAcWAn8F+gPHAkcBrwHHuvvSJGKoRlzxdypzb+BPQB9gJ+ACd/9RnPWOS4ztvwQYCFxbYvW77v7NeGocvzjfA2b2NeBfgBXAnYTvhMHAEcBcd/9K7AFUKabvwJHAxDKrTwDGAb9197+Np9bxifEzcBXwFWAlcDvhPbAv8AmgL3COu9fdib4Y4/8ZMJnw/T8b6AA+AhxMHf8NlJ6L8T0T+99f6V7e895GkPfcNevynndmXd7zxobk7poyPgF/ABy4sNPyb0fLv1dhOScB+wEGtEX7/jzt+GoRP+Ef9o8DTZ2W705I9Bw4Pe1YE27/Hcss/3pUzo1px5pk/EX7GXAP8BJwTVTGZ9OOswbtvwRYknY8Kb8GZ0bb/zfQUmL9dmnHmmT8XZT/aFTOJ9KONan4o+/6TcDrwK6d1p0UlbM47VgTjH9iIUZgcNHy7YDbonXnpR2rpvp5z8RZjqbatx8ZznsbYcp77pr1Ke95Z9anvOeNjTilXgFNVTYgDI8+NC+XSExaCFdddgD9e1huGxno1Ewq/k7lfDU6xvVpx5tS/IcX/uCmHW8t4gcuBjYDHwam13NiGGf8ZLRTM67XgDAcy+Jo2yFpx5XGe6BM+YdG5S8D+qQdb4Ltf0xUzqwy61cD7WnHm2D8P4vK+VIX74F5acerqa7eM4nnH5rSed2p47y3EaYk2pAM5a5Zn/Ked2Z9ynve2KiTxkvJvnHR/G5331y8wt3bgYeBZsJtxI2oFvFviOYbqygjKbWI/+PR/JkqykhKrPGb2UHAN4Dr3P2BOCuakLjbfwczO9vMvmpmF5vZSRkYSy+u12AMsA9wF/BWNEbOZdHrUM/j4iT9HfD5aP5jd6/HMTXjiv8F4D1gtJkNLl5hZh8mJLr3xFLjeMUV/+7RfHGJdYVlo8xsYC/rKfUjrvdM3vPPtOQ9720Eec9dsy7veWfW5T1vbEjq1My+A6L5n8usfyGa71+DuqQh0fjNrC9wTvTr73tTRsJij9/MppjZdDP7jpk9SBjn5RlCwlRvYos/autbCLddfbX6qtVE3O2/O+E1+DphbM17gRfM7MTeVrAG4noNjo7mbxDGlr2T8J6/FnjEzO43syFV1DMpiX0Hmlk/4GzC1R/1Oi5XLPG7+yrgMsKYcgvN7Adm9v/M7JfA3YRbwz7fVRkpiav9V0TzfUqsG17084EV1kvqV1zvmbznn2nJe97bCPKeu2Zd3vPOrMt73tiQ1KmZfQOi+Ttl1heWD0y+KqlIOv5vEG69u8vd/9DLMpKURPxTgGnAJcDxhKT2FHd/sxf1S1qc8f9fwqDc57n72irrVStxxj8TOJnQsdkfOAz4PjAM+J2ZHd7rWiYrrtdg12j+BaAf4QEpLYTP/x8It3Td2utaJifJ78C/i/b7ndfhQ8IiscXv7tcSHpLRF7gAuJww3tVS4GZ3/2s1FU1IXPHfGc3/0cxaCwujf5hnFG2nJ1pnX1zvmbznn2nJe97bCPKeu2Zd3vPOrMt73tiQ1KnZ+Cyae6q1SE+v4zezi4AvE56INjnOStVQj+N3993d3QidW5MIV+k8aWajEqhf0iqK38xGE85wf8vdH028VrVTcfu7+wx3v9fd33D3Ne6+wN2/QBg0ux9hjKYsqvQ1KNxmb8AZ7j7H3d9192eB0whjSp6YwVuCqvkb8Llo/v2Y6pKGiuM3s68AvwJuBkYQOvePJNx+/QszuzqhOiap0vj/E/gdIe7CFQfXAk8Bp/L+lQv1OASBxCuuvDHv+Wda8p73NoK8565Zl/e8M+vynjdmkjo1s69wNmFAmfU7d9qu0SQSv5l9CbgOWAicFF1iXo8Sa/+oc+s24BRgEOFBEvWm6viLbt35M/B/4qtaTdTi8/+9aP7hKspIUlyvwVvRfLG7P128Irr6oXDFyuge1zBZSX0HHkwY72kZYbynehVL/GbWBlwF3OHu/+jui6PO/fmEfy5eBb5sZsO7KCYNscQfjSv1CcKV+q8TOjTOJ7T/8cDKaFNddZB9cX1n5D3/TEve895GkPfcNevynndmXd7zxoakTs3sWxTNy437sF80LzduRNbFHr+ZXQL8G7CAkNi93uvaJS/x9nf3VwhJ7iGdB0KuA3HEv1O0/0HAOjPzwkS4DR/gh9Gya6utcMxq8fkvdGL0r6KMJMX1GhTKebvM+kLy2a+yatVMUu+Ben9AUEFc8f9tNJ/beYW7rwEeJ+RMR/S0ggmLrf3dfaO7f8vdR7p7P3ff2d0nEL7/RwJrgWerrbCkLu7vzLzmn2nJe97bCPKeu2Zd3vPOrMt73tiQ+qZdAala4YN0ipk1FT/Fy8xagLGEf0QeS6NyNRBr/GZ2GWE8oaeAj7r7iq73SF2t2n+PaF5vnRtxxL8e+HGZdaMIf4weIvwRrLfbe2rR/oXbXko9FbkexPUaPEB40ut+Zra9u7/Xaf2h0XxJ9VWOVezvATPbkXCl3mbKfzbqRVzx7xDNyw3KX1je+X2Rtlp8B0wGdgR+6u4buttY6l5c75m8559pyXve2wjynrtmXd7zzqzLe97YmNxdU8YnwuXpDlzYafm3o+XfK1q2HeHppSO6KbMt2vfnacdXq/gJt2848D9Aa9px1TL+aNnuJcpuIjwJ24GH0441yfYvU/b0qIzPph1nwu1/SKn3PDCUMJaeA19NO9ak3wPAz6Ptr+y0/KOEDr63gYFpx5tU/EXbTI72m512bLWKn/BQJCfcer1np3V/E7X/WmBQ2vEm1f7AziWWHQ2sAtqB4WnHqqnu3jMVl6OpLtsvk3lvI0xx/93uVMZ06jx3zfoU42cwk3ln1qe8542NOFn0wkuGmdkI4BHCU9RmAc8BxwAnES6dHuPuK6NthwEvA6+4+7BO5UwEJka/7g6MJ1yd9WC0bIW7T0kukt6JI34zO5cwyO8m4HpKj6OxxN1vTiiMXosp/kuAawhnDV8ijJ+2G3Ai4UFBrwMnu/vCWsTUE3G9/8uUPZ1wG88F7v6jBKpftZjafzrhiX1zo/XthAGvP0a4Qusu4DTf9ixyXYjxO3BX4GFgX8L33uOEjt3TCInLp9297p5EGfdnwMweJIyj+Al3n510/asV02egiZDkfoTw/r+N8L13EOEWIwMucffrahJUD8T4/v8jIQFfQHgNDiE8JGg9MMn1JOSGEeN7puJyJD55z3sbQd5z16zLe96ZdXnPGxtS2r2qmuKZgL2BmcBywmXOrxAG/G7ttN0wwpfkkhJlTI/WlZu22adepmrjryB2B+5LO84E4z8UuIFw+9EKwu0Q7wBPRK9NXZ/Bj+P9X6bcwvuirs92x9D+JwL/QXji6dvABuBN4L+BcyCcAKvnKa73ANBKOFP7clTOSkLCc2zaMdYo/oOi9UuBPmnHVcv4CWfjLyHccrQ6+h78K3AncEraMdYg/n8C5kXfAeujz8D3gGFpx6epPt8zPSlHU321HxnPexthiuszWKLcQtvWde6a9SnG79BM5p1Zn2LKmzKbNzbapCs1RUREREREREREJFP09HMRERERERERERHJFHVqioiIiIiIiIiISKaoU1NEREREREREREQyRZ2aIiIiIiIiIiIikinq1BQREREREREREZFMUaemiIiIiIiIiIiIZIo6NUVERERERERERCRT1KkpIiIiIiIiIiIimaJOTREREREREREREckUdWqKiIiIiIiIiIhIpqhTU0RERERERERERDKlb9oVEBFJg5kNA14Gfuru56Vbm2SZ2XbAGOAAYBCwAlgMPODuG9Ksm4iIiEgjU86pnFNEkqNOTRGRKpnZEgB3H5ZuTbZmZoOAy4HPATuX2GSlmf0EmOHuHT0seyBwATASOALYH+gDfNTd7+livz7AhcD5wH7AWuAx4Ep3f6QndRARERHJE+WcyjlFZGvm7mnXQUSk5qIzySOAd9x9eZVlLYH6SjDN7HjgV0ArcAvwS+BJ4C3CmfNDgL8DzgOWAx939z/1oPyRUXkAy4DtgN3oIsE0M4vqcQawCJgd1e+TwI7A6e4+qwdhioiIiNQ15ZzKOUUkOerUFBGpUr0lmGZ2HDAXeIGQtP25i20PBP6LkBye4O4vVHiMXYBRwJPuvsrMbgbOpesE8yzg34FHgJPdfV20/GjgIeAdYIS7t1cUqIiIiEiOKOdUzikiW9ODgkQkl8xsmJl5lBgVL785Wj7MzD5vZn8ys3Vm9oaZ/cDMBhRt22ZmDgwFhkb7eZlyD4zKXmpm66Py/t3MDihRt0IdhpvZhWb2jJmtNbP7KohrZ+DXwDPAmK6SSwB3fx4YB6wEbonObHfL3d9y9znuvqqS7SP/EM2/Vkguo7KeICS5Qwhn1EVEREQagnLOQDmniCRBnZoiIqVdHU1PAzcArxLG87mtaJslwAzC2d53op8L0+2FjcxsAjAf+N/AE8B1wBxgEvC4mY0qU4frgH8B/hT9/HAF9Z5CGMvorMLZZzP7oJn9ysxWR9MdUcL7oplNd/eVhPGGjgEmVHCMHjOzHQgDx68BHiyxye+i+bgkji8iIiJSp5Rzxkg5p0i+6EFBIiKlHQsc5u5/ATCzvsC9wElmNtrdH3f3JcB0MzsPwN2ndy4kumXmPwiJ1YfdfWHRukOAPwI/ItxW09ko4Ah3f7mSCkdnvD8D3OLuLxUd/0Fgb2AW4QmUxxNuvdlyYsvd/2hm84AzeT/Zi9O+hEHdF7v7xhLrC7cg7Z/AsUVERETqlXLOeCnnFMkRXakpIlLaFYXkEiBKimZGv47uQTnnAAOBacXJZVTms8APgSPM7OAS+15daXIZOQzYgzCGUMGlwAeBC9z9NHf/MnAc8Adgl077Pwoc3oPj9UThFqp3yqwvLB+Y0PFFRERE6pFyzngp5xTJEV2pKSJS2v+UWLY0mndOzLpyXDQ/3Myml1hfOEt8ELCw07rHe3AcgGHRfFHRso8SnjT5k8ICd99sZlcCn+60fwfQ0sNjxqUwrpKeXiciIiJ5opyztpRzijQQdWqKiJT2dollhVtY+vSgnEHR/IJuttupxLLXe3AcgOZoXnxmegjwF3fvnLgtKbH/3sBfe3jMShXqNKDM+p07bSciIiKSB2+XWKacs/eUc4rkiG4/FxFJViFhOtzdrYvppyX27ekZ5Dej+QeKlq0g3ArU2VbLzKw/MB54oIfHrNSLwCZgeDRWVGf7RfMun5wpIiIiIiUp5wyUc4rkiDo1RUSqt4nyZ9Ifi+Yn1KAezwCbgbaiZfcAHygMLA9bBne/vOj3PsD1wI7AjUlUzN3XA48QzuyXei3+Jprfm8TxRURERBqAcs5uKOcUyRd1aoqIVG8lMMTM+pVYN5NwW9E0M9tmsHczazKztjgq4e5vEhLazxUtvhZ4DfiJmf3GzL4JPAx8HHgLOBV4FvgU8Gl3XxZHXcq4KZpfaWY7Fhaa2dHAJwln/X+d4PFFREREskw5Z2WUc4rkhMbUFBGp3hzgaOD3ZvYAsB542t1nu/tKMzsDuA14zMzmEBK6zYTbcY4jjIG0Y+mie+xfgTvN7DPu/mN3X2FmJwDfIgzgDnA/cDwhmRsc1f+b7r6oZIllRMnq4OjX46P5P5nZ2dHPt7v77UW7/CcwCTgDeNLMZhNi/yThqoML3H11T+ogIiIikiPKOZVzikgRdWqKiFTvSmAg4Uz0WEKy9FNgNoC7zzGzDwFTCGMInQC8RzibfS8xnil299+a2X8BN5rZCnef5e6LgdNKbH5IlYc7AxjaadkpRT8vAW4vqpub2VmEW4LOBy4E1hHGVLrS3R+psj4iIiIijUw55/uUc4oItu3DyUREJMuiW5J+Sxjn6N8Iyds2T5g0s92AiwDc/Z9rWUcRERERyTblnCKSNnVqiog0oOhpj/8KXAwY8CiwgDDW0gDgcOBY4F3gK+7+w3RqKiIiIiJZpZxTRNKkTk0RkQZmZkOBzxBu0RlBSC7fAp4G7gRmunt7ejUUERERkaxTzikiaVCnpoiIiIiIiIiIiGRKU9oVEBEREREREREREekJdWqKiIiIiIiIiIhIpqhTU0RERERERERERDJFnZoiIiIiIiIiIiKSKerUFBERERERERERkUxRp6aIiIiIiIiIiIhkijo1RUREREREREREJFPUqSkiIiIiIiIiIiKZok5NERERERERERERyRR1aoqIiIiIiIiIiEimqFNTREREREREREREMkWdmiIiIiIiIiIiIpIp6tQUERERERERERGRTFGnpoiIiIiIiIiIiGTK/wfItrI2PSX7lwAAAABJRU5ErkJggg==\n", - "text/plain": [ - "
" - ] - }, - "metadata": { - "image/png": { - "height": 440, - "width": 666 - }, - "needs_background": "light" - }, - "output_type": "display_data" - } - ], - "source": [ - "di = 0\n", - "pyplot.gcf().set_size_inches(11, 7)\n", - "for dsname in \"bigann-1B deep-1B msspacev-1B msturing-1B\".split():\n", - " \n", - " di += 1\n", - " pyplot.subplot(2, 2, di)\n", - " \n", - " # the PQ 4-bit code does not expose enough statistics to do this computation\n", - " for t in \"PQ8bit\", : # \"PQ4bit\": \n", - " \n", - " suf = \"PQ64\" if t == \"PQ8bit\" else \"PQ128x4fsr\"\n", - " \n", - " indexkey, res, keys, stats = parse_result_file(find_latest_version(\n", - " f\"../logs/{dsname}.IVF1M_2level_{suf}.b.log\"))\n", - "\n", - " r10 = res[:, 0]\n", - "\n", - " # simulate QPS w/ 2 shards \n", - " t_per_q = res[:, 1]\n", - " # coarse quantization time\n", - " quant_time = t_per_q * res[:, 3] / 100 \n", - " # half the time to scan inverted lists \n", - " simulated_t = (t_per_q - quant_time) * 0.5 + quant_time\n", - "\n", - " qps = 1000 / simulated_t \n", - "\n", - " pyplot.semilogy(r10, qps, label=\"2x500M, 64 bytes \")\n", - "\n", - " suf = \"PQ32\" if t == \"PQ8bit\" else \"PQ64x4fsr\"\n", - " indexkey, res, keys, stats = parse_result_file(find_latest_version(\n", - " f\"../logs/{dsname}.IVF1M_2level_{suf}.b.log\"))\n", - " \n", - " r10 = res[:, 0]\n", - " # * 2 = we have two machines\n", - " qps = 1000 / res[:, 1] * 2\n", - "\n", - " pyplot.semilogy(r10, qps, label=\"1x1B, 32 bytes\")\n", - "\n", - " pyplot.title(dsname)\n", - " if di >=3: \n", - " pyplot.xlabel(\"inter @ 10\")\n", - " pyplot.ylabel(\"QPS\")\n", - " pyplot.legend()\n", - " pyplot.grid()\n", - " # pyplot.show()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "e22b90ec", - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.8.10" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/notebooks/eval_t2i_results.ipynb b/notebooks/eval_t2i_results.ipynb deleted file mode 100644 index ef293ad6c..000000000 --- a/notebooks/eval_t2i_results.ipynb +++ /dev/null @@ -1,234 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": 1, - "id": "936ec4fe", - "metadata": {}, - "outputs": [], - "source": [ - "import numpy as np\n", - "from matplotlib import pyplot" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "id": "00d04ee8", - "metadata": {}, - "outputs": [], - "source": [ - "%matplotlib inline\n", - "%config InlineBackend.figure_format='retina'" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "id": "8499fc2c", - "metadata": {}, - "outputs": [], - "source": [ - "import sys\n", - "sys.path.append('../')\n", - "\n", - "from track1_baseline_faiss.parse_results import parse_result_file, find_latest_version" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "id": "38e8d46b", - "metadata": {}, - "outputs": [], - "source": [ - "dsname = \"text2image-10M\"" - ] - }, - { - "cell_type": "markdown", - "id": "3820d8b4", - "metadata": {}, - "source": [ - "# Results on IVFFlat indexes\n", - "\n", - "This is to see how it performs without compression." - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "id": "ef3aa64a", - "metadata": {}, - "outputs": [ - { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAwcAAAILCAYAAAC97eCAAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjMuNCwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8QVMy6AAAACXBIWXMAABYlAAAWJQFJUiTwAADIS0lEQVR4nOzdd3wUx9nA8d9c06l3CTUQAokqqsF0g3tN3OKSuL4uiUucxHbe9P6mOHHvvcUlcYnjio0bpndMB0kggVDvOul0ujbvH3ucJCSKjA4heL6fz310Ozu7O7uJ7X1unplRWmuEEEIIIYQQwtTfDRBCCCGEEEIcGyQ4EEIIIYQQQgASHAghhBBCCCECJDgQQgghhBBCABIcCCGEEEIIIQIkOBBCCCGEEEIAEhwIIYQQQgghAiQ4EEIIIYQQQgASHAghhBBCCCECJDgQQgghhBBCABIcCCGEEEIIIQIkOBBCCCGEEEIAYOnvBpwolFLFQAxQ0s9NEUIIIYQQx7dsoFlrPbS3B0pwcPTEhIeHJ4waNSrhaF/Y4XAAEB0dfbQvPSDJ8+odeV69I8+rd+R59Y48r96R59U78rx6pz+f17Zt22hra/tGx0pwcPSUjBo1KmHt2rVH/cILFy4EYO7cuUf92gORPK/ekefVO/K8ekeeV+/I8+odeV69I8+rd/rzeU2ePJl169aVfJNjZcyBEEIIIYQQApDgQAghhBBCCBEgwYEQQgghhBACkOBACCGEEEIIESDBgRBCCCGEEAKQ4EAIIYQQQggRIMGBEEIIIYQQApDgQAghhBBCCBEgwYEQQgghhBACkOBACCGEEEIIESDBgRBCCCGEEAKQ4EAIIYQQQggRIMGBEEIIIYQQApDgQAghhBBCCBEgwYEQQgghhBACkODg+NdSjfJ7+rsVQgghhBBiALD0dwNEiD0+nVOctXgskbApDSJTIDIJolIgMtn4Hhn4HhXYFxYDSvV3y4UQQgghxFEmwcHxzO8DZx0AVm8r1BUZn0Mxh3UEDsEgotMnat/3FIhIBLP830gIIYQQ4nggb3U9UEr9HvjdfsVVWutB/dCcb87VBJHJ6NZaFP7DP87XDs17jc/hCE84QE9EcvfAIizqm92LEEIIIYQIOQkODmwHMLfTtq+f2vHNRSTATwv56ssvsHpamDkhF1proKUaWmuhtdrYbq0NlNUYH4+zd9dpqzc+NdsPXdcaEQggkrumOKWMhhHngi3im92rEEIIIYQ4YhIcHJhXa13Z343oE8qExxYDKaOAUYeu724NBBGBYOFAQURrDTjrAX34bfE4oXGP8dmfLRryL4GJV0PGZBn3IIQQQghxlA3I4EApdSlwCjABGA9EA69qra86yDGZwB+Bs4FEoAL4L/AHrXVDD4fkKKXKADewEvil1npXH97GscsWaXzisw9d1+c1xjUEg4jOAUSn3omWwH6f+8Dncjtg7YvGJ3kUTLwKxl1upCcJIYQQQoiQG5DBAfBrjKCgBdgLjDxYZaXUMGAZkAK8C2wHpgI/As5WSs3UWtd1OmQlcF2gXkrgesuUUmP2qyfMFohONT6HojW0NweChpqOIKK5DLb8F+p3dtSt2QYLfgWf/Q5GnGP0Jgw7TQY/CyGEEEKE0EB90/oJRlBQhNGD8OUh6j+O8ZJ/h9b6kX2FSqn7A+f6M/CDfeVa6/mdD1ZKrQB2AdcC9/dB+09MSoE91vgkDuu679TfwJ4VsP6fsOWdjnEPfi9se9/4RKfB+CuNHoX9jxdCCCGEEEdsQC6CprX+UmtdqLU+ZLK7UioHOBMoAR7bb/fvgFbgaqVU5EGu1wJsAXK/caPFwSkFQ6bDhY/D3QXwrUcgc2rXOo4KWHI/PDIJnj8Hvn7NGB8hhBBCCCH6hDqM9+tjmlJqLkbPQY9jDpRSNwLPAE9rrb/fw/5PMIKH07XWnx/gGnagGHhCa/3HQ7Rn7QF2jczNzY14+umnD3Z4SDgcDgCio6OP+rWPVERrKYMqP2dQ5RfYPE3d9nvN4VSnzKZy0Ok0x+T1ySDmgfy8+oM8r96R59U78rx6R55X78jz6h15Xr3Tn8/r5ptvprCwcJ3WenJvjx2oaUW9MSLwt+AA+wsxgoM84HMApdS9wPvAHox0pN8AkcBLIW2p6MYZmcWuYddRPPQqEurXklbxGYl1a4LrNlh8baRXLCC9YgGtEVlUpJ1O5aBT8Vpj+rnlQgghhBADz4kQHMQG/nb/2blreVynskzgdSAJqAFWANO01rsPdbEDRWhKqbXR0dGT5s6dexhN7lsLFy4EoD+u3bdOB34GjirY8DqsfwXqCoN7I52lDN/5AsMrP4TrPoDkEQc+1UEcP8/r6JDn1TvyvHpHnlfvyPPqHXlevSPPq3f683kdSW/FgBxz0Mf25aEE86u01ldordO11jatdYbW+hKt9dZ+ap/YX3QqzPox3L4a/meBMUDZ2mnISGs1vPQtqD8xZp4VQgghhOgrJ0JwsK9nIPYA+2P2qycGAO3z4dq2jfqvCvGddo8xiPmCh8EWZVRoqYSXvg2Npf3bUCGEEEKIAeRESCvaEfibd4D9+2YgOtCYhG9MKRWNsUAbgNXv9/f1JU5Yu6+5lra1xthvW3Y2UbNnweRrjSlOX7kEvC5o2gMvfxuun3946zAIIYQQQpzgToSeg31rIJyplOpyv4GX95lAG8a4gr52F1AW+ORXVVWF4BInJvuoUcHvznWdJojKngVXvApmm7Fdv9MIEFpl7TohhBBCiEM57oMDrfVOYAGQDdy23+4/YMxC9LLWOhQT5t8HZAQ+m1JT5dfrvhIxeVLwe9vadV13Dj8dvvMiKLOxXbMN/nkhtDUereYJIYQQQgxIAzKtSCl1IXBhYHNQ4O90pdSLge+1Wuu7Ox1yK7AMeFgpdRqwDTgZmIeRTvSrULRTa+0AHIE2e0ym4z4WO2rCJ3UKDjZuRLvdKJuto8LI8+Dip+HtGwENlRvh1e/A1e9AWNTRb7AQQgghxAAwUN9WJwDXBj5nBcpyOpVd2rlyoPfgJOBFjKDgLmAY8DAwXWstOScDjDU1FWtmJgDa5cK1bVv3SvmXGist77N3Fbx5LQzwhf+EEEIIIUJlQAYHWuvfa63VQT7ZPRxTqrW+XmudFpiidIjW+kda6/p+uAXRBzqnFjn3Ty3aZ9LVcO69HdtFnxm9CEIIIYQQopsBGRwMFEqpaKVUulIqHZmtqM+FT+pYb67LoOT9Tb0JRl/YsV30WegaJYQQQggxgElwEFoyW1EI7T8oWR8sXWjEOR3fiz4PYauEEEIIIQYuCQ5CS2YrCiFbTg7mWGNtO19DA+7ikgNXHnZqx/fSleCSNe+EEEIIIfYnwUEIaa0dWutyrXU5ILMV9TFlMnWZtci5ds2BK0elQNp447vfC8WLQtw6IYQQQoiBR95WxYDWZVDy8uX4Ww+yXMXwMzq+F34awlYJIYQQQgxMA3KdAyH26Twoufmj+TR/NB9TRATm5CQsyclYkpKxJAW+qzAs5WFYwn1YNn6G+VwvyiL/CAghhBBC7CNvRiGklIoGogObMltRCNjHjsEUFYW/pSVY5nc68e/eg2f3nh6OSAz81fDaeMyJCV0DiKQkwpsa8cfE4IyKCpaZIiOPyv0IIYQQQvQnCQ5C6y7gd/s2ZLaivmey2Uj/x9+pe+45vOUVeGtr0W734R3s9+OrqcVXU0t7p+KYwN/dzzwbLFMREViSk7oFEpbkzt+TMSckoMzmPrs/IYQQQoijSYKD0LoPeDrw/ePU1NT8/mzM8Sp63jyi580DQGuN3+HAW1ODt6YWb22t8b22Bm9NDb5dm/CWFuJtM+FzH/5LvHY68RywN6ITkwlzQkJHwHCAQMKclIw5SnojhBBCCHFskeAghLTWDsABoJSS2YqOAqUU5pgYzDExhA0b1r1CUxk8MBoAjQ3vyT/H2+rD2+LB62jH2+Sismg3tLQRrc14Gxrx1tb3rjeithZfbdfeiB7bGhHRQwCRHOihCPREJCVhSUiQsRFCCCGEOCrkjUOcWGIzIGU0VG9F4ca68o9Y96uSnN11W2vwexTe9jC83nC8bjvedivedgu+NhNeJ3idfrwtXnxO72E3RTudePbswbPnMHsj9gUSiYmYExOxJCZiSUrEnNDpb0I8yrr/HQkhhBBCHB4JDsSJZ+wl8MXWw66uFJhtGrPNRRiug9bVPvC2m/C6zHjbuv71uUx428x4A3+1Xx1eA3rRGwFgjo7AnBCPJSkFc0oqloSuQUTn4MIUEXF4bRBCCCHECUGCA3HimXEH/ohkXnhvAVZ/O+G0E67aCcdNROC7HTcRuIg0eYhQ7YTpdkwcerYpZQZrhB9rxMHrBnsjOgcRLpPRE7F/WXvvBjj7HE58Difu3WWHbm94eCBYSMCSmIQlMcEIHBISsWUPIXLaNJTN1qvrCyGEEGLgkuBAnHgsNppHX8nbS3Morm2lzeM7jIM0NrxGIEE7MRYPw+IUQ2NMDImBjChIj9Ck2v1EqHaUxwmetsCnNfDXCW6jXHlaMXvaMHvaCHMH9nt7XsBN+zF6GwJBg89lDvROmIzeCJcZ377tdhNwmD0SgG5rw7N3L569e3vcb46LI+b884m98ELsY0aj1OGfWwghhBADjwQHISTrHBy74iJsfPSj2WitqWpuZ1dNC7tqW9lV08qagj1UtvqpbdP49b4jFG6suLHSRBSVXiioBWq7nzs23EpOciRDkyLJSYokJyOKoUnGtt16kF4Avx+8gYDC3RFQKI8Tq6cNa6cygsHHvoDD+Oh2J76mZmNmpuoKvK1evO3dgwgjsDh0apOvsZGGV16h4ZVXCBucSuw5pxFzyRVYB+d+00cvhBBCiGOYBAehJescHOOUUgyKtTMo1s6M4UkALFxYDcCMWXPYU+9kV00LxYHAobi2lV21rdS2HDj7v6nNw/o9jazf09htX0ZceDBwGJoUSU5yFDlJkaTHhWM2mcAWaXwik77Z/WD8Q20BI3fJUQkNxVBf3OlvCTQUo1ur8XtVMHDY1/Owb0xES0UYXmfHvyLa91RR/dRrVD/9KpHpfuImJBA1eQRDXRacEVngnmK0XQghhBADlgQHoSXrHBwlWus+T3mxWUwMT4lieEpUt31NbR5KalvZVdtCcU1rsNfhUGlKZY1tlDW2sbiwa5eDzWIiOzEiGDAMTYpkWHIkQ5OiiI+wfrN7Uwpi0ozPkBndd7uaMDeUYK4vxtYlgCiB5r1ofxPOKhtNJRE0l9rRvsBUvFrRWmamtawJ04IVxGY7yRrvgIq34YIHYPjpvW+rEEIIIY4JEhyEkKxzcPQ8seEJPi75mBHxI8iLz2NEgvE3NSI1JHnyseFWxmfFMT4rrku51prKZtd+AYORslRa7+yUptSV2+unoKqFgqoWoGsPU+c0pWHJHSlKh0xTOhR7LKSNNz7787pRTaVE1hcT2VBMankBjmUbaFpbjrOsI/jxe0w0FEbhdZnJnLkHXrkExl0BZ/0FIhO/eduEEEII0S8kOBDHhW112yhuKqa4qZiPSz4OlseGxZIXn2cEDPEjyEvIY1jsMOwWe0jaoZQiLTactNjwYJrSPm6vP6RpSjlJkQztkqZ0BEGRxQaJw4wPYAbiLoQ4wF1aStObr9H0/od4KmoAcJSG46xpJSLZDRv/BUWfwTn3GNPGyiBmIYQQYsCQ4EAcFwobC3ssb2pvYnXlalZXrg6WmZSJ7JjsLj0Moexl2Kc3aUo7a1sp7oM0pZykKIYGex2MNKWEyCObmtSWlUXynT8j6Sf/S/ldd9P80UcAVO3IJjupwIgFnLXw9g2w8Q04/36IzTyiawohhBDi6JDgQBwX3vn2OxQ1FFHQUMCOhh3sqN9BYUMhDo+jW12/9rOraRe7mnZ162UYET+CiNYIMmwZJNclMzxuOGHmsJC3/3DSlDoCht6mKXUVF2ENpiXtS1PKSY4kO7F3aUpKKZLvvJOmBQtQXi+uvS04Bv+cmKZ/QnNgjYXCT+Cxk+H038NJN4Ck1gkhhBDHNAkOxHEh3BJOfnI++ckdY7611lS0VhgBQ/0OdjQYAcPu5t1our9VN7U3sapyVXD71Q9exazMwV6GvIS84JiGlIiUozLn/6HTlDqlJwXTlFqobXEf8JyNzp7TlJSC9NjepSnZMjNwnnoqkQsWAFD9+hdE/WcRpsV/g9XPBBraAh/dDZvehG89AskjjuyhCCGEECJkJDgIIVnnoH8ppUiPSic9Kp25WXOD5U6Pk52NO4M9DAUNBRQ0FNDi6f4ru0/72Nm0k51NO5lfMj9YHhcW1zGWIZCaNCxu2FHpZdjHSFOKZnhKdLd93yRNSevDT1PKCfQ2DE2KovXsswhfuhRTayuevXtpePsDEq+/F/Ivhfd+CLUFxklKV8KTs2Duz2HWnTIWQQghhDgGSXAQWrLOwTEowhrRYy9DeWs5BfUFfLLuE8o8ZTRYGtjTvKfHXobG9kZWVa7q0tMQ7GXo1MMwImEEyeHJR31l4aOZphRphcvHnMl3Vr0DQMWjj1E98zSGDp2C/QdLYNG9sOR+8HvB54bP/wi2KDj5+31920IIIYQ4QhIchJasczBAKKXIiMogIyoDtct4kZ87dy5Oj5OixqIu4xgOq5ehuKOXIT4sPpiWtG/WpGFxw7CZj2xg8DdxuGlKuzr1NBwqTanVAy8POpkZkYvJaK3F3NrCP//3Xl4fdWYgTel0puWN5bLyv5PcvBkA/elvUTnzIDkvpPcrhBBCiN6R4CCEZJ2DgS/CGsG45HGMSx4XLNNaU9ZSFhz8XNhQyI76HZQ6SnvsZWhob2Bl5UpWVq4MlpmVmaGxQ7ukJY2IH0FSeNJR72XY51BpSsW1Ri9DT2lKXpOFN/JO5Sfr3wAgp6m8a5oSFh7ip7xr+w2jTHtQXhc7nrySh4Y8zpCU2GCaUm5qNDF269G+dSGEEEIESHAgRC8ppciMziQzOpNTB58aLHd6nBQ2FnYZx1DQUECrp7XbOXzaR1FjEUWNRXxU/FGwPD4svltaUk5sTr/0MnQWG25lQlYcE/ZLU/L7Nf9d8CWVrZq0UVMhEBykeVsxKbqkKbmx8hPPrbxr+zVhyssIXxF5BU/y4NZLu5wzOzGCMRmx5GfEMjY9lrEZMcRF9O/9CyGEECcKCQ6E6CMR1gjGJ49nfHLHisN+7ae8pZwdDTsoqC8I9jaUOkp7PEdDewMrK1aysqKjl8GiLGTHZnfpYRiRMIJEe2K/9TLsYzIpEuwmEuww46Sx7PyrUT7C5mb7n87pIU0pgaeqr+AO/ysA3G7+L1/4JrJRDwues6TOSUmdkw83VgTLMuPDjWBh3yc9hsSoozf4WwghhDhRSHAgRAiZlCnYy3Da4NOC5a2e1uD4hX1TrRY0FOD0Orudw6u9wV6GD/kwWJ5gT+gyY9KIeKOXwWrun7QcS1LHGAZvTQ1Ws+o5Tck/Fe/zRVj2rsCi/LwY9yx/zHiS7bVeiqpb8PYwKnpvQxt7G9qYv7kyWJYeaw8GC/kZsYzJiCElOjQrXwshhBAnCgkOhOgHkdZIJqRMYELKhGCZX/uNsQz1Ru/CvqBhb8veHs9R76pnRcUKVlSsCJZZlIWhcUM70pLiR5CXkEdSeFKP5+hLpshITBER+J1OtNuN3+HAHBPTQ0UzloufNKY1dbeQ0LabB5M+gKv+QrvXx45KB5vKmthc1szmsiZ2VDpw+7pPA1ze5KK8ycWCrR2zgKXGhAVSkTqChtSYsH7vYRFCCCEGCgkOhDhGmJSJrOgssqKzOG1I916Gfb0L+wKHNm9bt3N4tZfChkIKGwq7lCfYE7qMY8iLzwtJL4M5OQn/7j1GW2pqeg4OABKGwll/gffvMLbXvQxn/h9hFjPjMuMYlxkXrGpMo+pgS3lTMGjYVtFMu7d7wFDV3E5VczWfb68OliVFhTE2I8boXUiPJT8zlvRYuwQMQgghRA8kOBDiGHfAXgZHWZcehh0NOyhrKevxHPWuepZXLGd5xfJgmcVkISc2p0sPQ178kfUyWJKT8QSDg1rChg07cOVJ18Bnv4O2BnA7oLkM4rK6VbNZTMGegMunGGUen5+i6hY2lzUZn/JmtpY397jAW21LOwt31LBwR02wLCHSxpj0mGDvwtj0WLISwiVgEEIIccKT4ECIAcikTGTFZJEVk8XpQ04Plre4W7rMmLRvqtUeexn83uCYhw/4IFieaE/s0sPQm14GS1Jyx/lrag5SE2OF5KQRUBpIi6rd0WNw0BOr2cSotBhGpcXwnZOMY3x+za6ali4pSVvKm2h1dw8Y6lvdLC6s7bISdGy4lbEZMV3SkoYkRGAyScAghBDixCHBQQgppaKBfaMxrX5/9zQIIfpSlC2KiSkTmZgyMVjm1372OvZ26WUoaCg4YC9DnavugL0M+2ZKyo3PZUT8CBLDE7sca0nuFBzU1nJIyXkdwUFNAQw//eD1D8JsUuSmRpObGs3Fk4wyv19TXNca7GHYVNbElrJmHO3ebsc3tXlYWlTH0qK6YFl0mIXR6TFdZkoamhSJWQIGIYQQxykJDkLrLuB3+zaqqqoOUlWI0DApE4NjBjM4ZjBnDDkjWO5wO4yxDIGgoaC+gMLGQ/cyvL/r/WB5UngSSTqJDFsGLbtaGB7Z6ZhD9RyA0XOwT+2Ob3R/B2MyKYYlRzEsOYpvT8gAjIBhT72TzeUdwcKmsiaa2jzdjne0e1lZXM/K4vpgWYTNzJj0GGP8QoYxhiEnKRKLWRY5FEIIMfBJcBBa9wFPB75/nJqamt+fjRGis2hbNJNSJzEpdVKwzK/9lDpKu6UlHaiXobatllpq2e7azueLP+eUUj+3Bfat3foZrQXZjE8ez7DYYZhN5u4nSO4UHNQU9OHdHZjJpMhOiiQ7KZLzx6UDxqrXexvagr0Lm8uNtKT6Vne3451uH6tLGlhd0hAss1tNjE6L6bQOQyy5qVFYJWAQQggxwEhwEEJaawfgAFBKeUwmeVEQxzaTMjEkZghDYoZwZvaZwXKH29FtTYbChkJcPleX4xs79RzEbNnD5v/7HatiFS0JduKH5DE4dzJjsqcyLmU8sWGxkJTXcUAIeg4Ol1KKrIQIshIiOCc/DTAChoomV2AMw760pGZqW9q7He/y+Fm3p5F1exqDZTaLiVGDortMq5qXGt3tWCGEEOJYIsGBEOKQom3RTE6dzOTUycEyn99HqaOUd5a8Q5mnjPbodhyOzYCxUFmiAy5YpQENOIGvga9x2p5jRQy0JkZgTkslpi2ZNGsbmfZmwnZtwzIkD2XuoZfhKFNKkR4XTnpcOGeNGQQYAUO1o51Ne5vYXL4vaGimstnV7Xi318+GvU1s2NsULLOaFemRiuwYE3vtu8nPiGXEoGjs1v6/XyGEEAIkOBBCfENmk5ns2GwmRk5kIhOZO3cuep6fnV9cgmfr9gMeF+GGwbVArRN2FANWNFZKAT67GL/ZhE5OIDxzMOEZWVgz0rGmp2NJS8Oabnw3hYUdpbvsSilFaoyd1NF2Th+dGiyvcbQbwcLewDiG8mbKGruP3fD4NLubNbub/Xy1dzMAlsBA6rHpMeRnGr0MowbFEG6TgEEIIcTRJ8GBEKLPKJOJYW+8SduGDXjKyvCUV+ApL8NRWkzb3lJMVXVY3N1nCurM5PNDZS3uylrca9b1WMecmBgMFKz7goaMjm1TbOxRXbMgOTqMeSNSmDciJVhW19LOlvLmQLBgBA2l9T0N9tZsqzAWdntzrbEatklBbko0YzI6ZkoanRZDZJj8K1sIIURoyX9phBB9SlksREyeDJM7UpDSAn+11vgaG2ndU0xxwWoqdm6keccafLXNxDgguQliur8/d+Orq8NXV4dr06Ye95siIrBmdO5tyDCCiH29EMnJIU9dSowKY05eMnPyOqZ3bXJ6eHX+IkqafDjtSWwua6KkztntWL+GHVUOdlQ5+M86YzC4UpCTFNllWtUx6TFE2/t2lWshhBAnNgkOhBBHjVIKS3w8sfHxTBg/iQkALTXo1y6lonoTG8LCWGQKY7fHhsNpJd4BSU2a5CZIatYkNUNiM5j1wa/jdzppLyyivbCo5woWC9bU1GDvgyU9rVNPRDrW9DRMdnsf3z3ERlgZnWhmdKKZuXONWaKaXR62lDUHexc2lzWxq7YVvd89ag07a1rZWdPKf78uD5YPTYoMzJBk9DKMSY8lNkICBiGEEN+MBAdCiP4VlYy6+SvS21tIbyrlnMZSaNqDq6GYrfU72OgsY4O3iQ1mqLFYMPm1ETQ0Q3KTJrk5EDg0dWyHdV+yoCuv10h7Kut5ilYIpC51GudgfDqNe+ij1KUYu5XpwxKZPqxjQbmWdi9bA9Opbi4zBj8XVbfg7yEoKq5tpbi2lfc3dAQMgxMijNWeA9Oq5mfEEh9pO+K2CiGEOP5JcCCEODaERUHKKOMD2IFJgQ+AdrdRUfU1G8qWsqF2ExscJaxwN+Cl+0/sUW10CRqGNfjIbvST1AQRDgXth55WOJi6tHlzj/tNEREdPQ5pnQKIDGPcgyUl5RunLkWFWZg6NIGpQxOCZU63l20Vji6rPRdWt+DrIWLYU+9kT72TjzZVBssy4sIZGxjDMCYwtWpSVP8M7BZCCHHskuBACDEgKFs46VnTSc+azjmBMpfXxbb6bWyo3sCG6vVsqP6amvZ6WiKgJQKKB+37Zb9rMBDX7mNarYcJdV5yG3ykNWnMrSY8rWY8TuODPnivgN/pxF20E3fRzp4rmM1YB6UagUNGOpbAYGlbbS2+hAT8LlevUpcibBYmD4ln8pD4YJnL42N7pSOw0rMRMBRUOfD4ugcMZY1tlDW28cmWjpXaB8XYA+MXOgY+p8b0fTqVEEKIgUOCAyHEgGW32JmYMpGJKROB64yFy1or2FCzwfhUb2B7/Xa8uusMSY1hZj7OMPNxRkdZtsfLOJeL8e1OxjnbyW724XdaugQMnlZzcFt7D9H74PPhKSvHU1YOazqK973a7/j9HzDHRGJNDaQvZWRiHTwMa1ZWcCC1OS7uoKlLdquZCVlxTMiKC5a1e30UVLawOTCGYUtZE9sqHbi9/m7HVza7qGx28dm2joAhOTqsY/xCoIchLdZ+VGd/EkII0X8kOBBCHDeUUqRHpZMelc45Q43+hS69C4GgoaatptuxJVYLJdYo3gssYhxpsjLWHMN4n2K8s5VxTTWkORsAjdbgd6tAwGDpFjh4Ws342g+dUuRrbsXX3IqrcA+wsvv9WE1Y4+1Yk2KxpiRgHTQIa0YG1sFDsQ4dgWVwHsoe2eWYMIuZ/MxY8jNjuTJQ5vH5KahysKXMmFp1c3kTW8ubae8hYKhxtPPljhq+3NHxjBIjbUzJTmB2XhJzcpPJSog45L0JIYQYmCQ4CCGlVDQQeNXA6vd3/w+xECK0uvYu0KV3YWPNRjbUbGBb/Ta8/q69C61+Dyv9dcYrux2wR5EdM4Zx8SMZHzWY8WHJDMeKva0BWmvBWQutNdBaB85a/E21eOqa8ATSlbz7BxCHkbqkPX7c1U7c1U7YWgFs6VpBaawRGmuMGUtiFNbUFKxZg7EOycWaOx7riMmYomOxmk2MSTdmMrpsShYAXp+fnTWtwRmSNgcWb2vz+Lq1o67VzcdbKvl4izGGYWhSJLNzjUBh2rBEomT9BSGEOG7Iv9FD6y7gd/s2qqqqDlJVCHE0HFHvQvNuSpp3815gO8ISQX5SPuOSxzEhdybjksYRZ48DjFEOYX4fYV2Ch1oKvl6G1dPMsMRIvFXleCqr8dQ04KlrwdPYjqfVFAwk/IdKXdIKT6vC06qhwgGbHcBO4MtgFbMdrHFhWJPjsKYNwj56DDHfvgRL8nBGDIpmxKBoLp2cCYDPrymubQkEDEYvw9byZlrauwZO+2ZIenn5biwmxaQh8ZySl8zs3CTGpsdiMkkKkhBCDFQSHITWfcDTge8fp6am5vdnY4QQPeupd6GytbJj7MIBehecXicrK1eysrIjJSg7JptxyeMYnzye8cnjGR43HHNkUnB/eU0cAEPnzsUKdFmRwO8HVyO01qBbavDXlOLZU4KnbC+eyio81fV46prxNLjwNHvxtR36JdznAl9lO67KKthUBQs24P7sSZLznRCbBUm5MPZSGHcZZpOZ4SnRDE+J5qKJ+5qkKappYVFBDYsLa1lZXIfL09EL6vVrVhXXs6q4nn98soP4CCuzco1AYXZuEmmx4b35n0IIIUQ/k+AghLTWDsABoJTymEyHnj5RCNH/lFKkRaWRFpXG2UPPBqDd187Wuq1sqN7AxtqNbKjeQHVbdbdjS5pLKGku4b2dRv9Cl96FlAm0+lqJNEd2Ow4AkwkiEiAiAZU8AvNQME81spp64m9rw7u7AM+ubXh2bsOzZxee8nKjJ6KhDU+L7jF1ybE3nOSxLdC42/gUfQZLHoBTfw2jLjCWYw42SZGXGk1eajQ3zs7B5fGxpqSBxYU1LCqsZVtFc5dzNzg9vL+hPLjuQl5qFLMDwcLJQxMJt4V2ZWohhBBHRoIDIYQ4DGHmsD7rXUixpPDpkk+79i6Yev/SbAoPxzZyPLaR43vcr12teHd+jafwazw7t1H+7Oegob3Ris+tMNs6TXlauwPeuBrSJ8Fpv4Vh83o8p91qZlZuErNyk/gFUN3sYklRLYsLa1lcWENti7tL/YKqFgqqWnhuSTE2i4mp2QmBXoVkRqVFyyxIQghxjJHgQAghvoED9S5sq9vWZSrVnnoXqr3VvLfzvQP2LnQeu3BEbbRHYh0zE+uYmQDUL/9OcFE356n/JnrcYNj6Lix7FNwO46DydfDPC2HoHDj3PkjOO+g1UmLsXDwpk4snZeL3a7ZVNgcDhdXFDbh9HSlIbq+fJUW1LCmq5a/zt5McHcbs4UnMzkti1vBkkqNlUTYhhOhvEhwIIUQfCTOHMSFlAhNSJgDdexc21mxkS+0WfHSdEain3oUhMUOCPQtH0rvQWcRJJ3UEB+s3En36WcaK1FNugiX3w6pnwNduVC5eZAQJt68B2+FNXWoyqeCsSD84ZRhOt5eVxfUsLqhlUWENRdUtXerXONr5z/oy/rO+DIDRaTHMzkvilNxkJmfHE2aRFCQhhDjaJDgQQogQ6al34dMvP6W0vRRzlvmgvQu7m3ezu3l3j70L45PHMy55HPH2+G7HHUzElJOof/FFAJxrOq3MFpkIZ/0Zpt0Ki/4O6/4J2gfNZVC4AMZc+I3uP8JmYd6IFOaNSAGgvLGNJYW1fFVYw9KiWhqdni71t1Y0s7Wimae+2oXdamJaTiJpykN+khmttaQgCSHEUSDBgRBCHEVWZSXHnsPcMXOBnnsXttZvPayxC73tXQifNCn43bV5C/7WVkyRnQZHx2bABQ9BVCp8dY9Rtvmtbxwc7C89LpzLpmRx2ZQsfH7N5rImY2BzQS3r9jTg9XeMgXB5/CwMLMT2OvDIpi+MtRXykpk5LIn4SFuftEkIIURXEhwIIUQ/OpKxC73tXbDExxOWm0t7YSH4fBTMnkP4mDHYx+UTnj+O8HH5WNLSUGMv7QgOChaAqxnsMX1632aTYnxWHOOz4rj91FwcLg8rdtUHpkytoaTO2aV+RZOLN9bs5Y01e1EKxmXEMicvmdm5yUwcHIfVLLPBCSFEX5DgQAghjjE9jV2oclbxdc3XxlSqR9C7EDtjhhEcANrpxLl6Nc7Vq4P1zUlJhOfnE+4aht22l/AEN+btH8KEK0N6z9F2K2eMTuWM0akA7Klzsriohv8s28bWOh9tnW5Va9iwt4kNe5t45IsiosIsTB+WyJzALEjZSQeYKlYIIcQhSXAghBDHOKUUgyIHcXbk2ZydfYDehZoNVDsP3buQkhjGLVOSGV7YSlijs1t9X20tLV9+iTF0OBEA68K/ET5zHeHj8rHn52MfNQqT/UCrL/SNwYkRfC9xCBltxfj8mrhh4/mqwJgFaUNpI50ykGhp9/Lp1io+3WqsQj84ISI4XeqM4YnE2K0HuIoQQoj9SXAghBAD0P69CwCVrZWH7F2oDmvnD6e3w+kQ7zAzutLKzKYU8ipNxOyqhtbuAYOnwY3ngw9o/uADo8BiwZ6X1yUdyZaTgzKHZnYhs0kxeUgCk4ckcOcZeTQ5PSzdWRscr1DW2Nal/p56J6+u3MOrK/dgNikmZsUZC7HlJTE+Mw6zSQY2CyHEgUhwIIQQx4ne9i40RCuWRntZirGasdKaCW3pnN46hNFVVuJXLsVd6wX/fi/TXi+urVtxbd1K47/+DYApIgL72LGB3oXA+IVBg0Iyw1BshJVz89M4Nz8NrTW7altZXFDD4sJalu+qw+numCrW59es2d3Amt0NPPBZATF2C7MCvQpz8pLJiAvv8/YJIcRAJsGBEEIcpw7Wu7Cuah3Ly5dT0lwS3KeVYn1ENesjqiEZLGMUJ7X4OH1vO+MTLiG6xopr0ybcJSXdruV3OnGuWoVz1apgmTk5ifCx+R0BQ/5YzLGxfXqPSimGJUcxLDmK62YOxe31s3Z3A4sLjWBhU1lTl/rNLi8fbarko02VAOQkRzInN5k5eUmcPDSRyDD5z6IQ4sQm/xYUQogTyP69C6WOUpaVLWNJ+RJWVazC6e1IK/IqWBFtZ8UoO/AZSdlJzPj2DObE3sSEuijM24tp27SJto0b8NXUdruWryYwfuHLL4NltiFDsI8bZwx6HpdP2KhRmML6bmVkm8XE9GGJTB+WyP+eDXUt7Swpqg2u2lzV3N6l/q6aVnbVtPLishKsZsXkIfHMzk3mlLxkRqfFYJIUJCHECUaCAyGEOIFlRWdx+cjLuXzk5Xh8Hr6u+ZolZUtYWraUHQ07utStbavlvZ3v8R7voVDkZ+Uzc9pMZqTdwEhfCu7NW3Bt2kTbxk24Nm/G39ra7Xru3btx795N8/vvGwUWC/YRI7qOXxg6tM/GLyRGhfHtCRl8e0IGWmsKqlpYXFjDVwU1rCqup93rD9b1+DQrdtWzYlc9//hkBwmRNmYNTwpMmZpEakxoB2ELIcSxQIIDIYQQAFjNVqYMmsKUQVP4yeSfUPPmtSzb/SlLw+0si02iydcx8Fej2Vi7kY21G3liwxPE2GKYnj6dmd+aycxbrmZwWCLu4mLaNm6ibdNGXJs249qxAzxdV0XG68W1ZQuuLVtofP1fQNfxC2EmE57s7D5ZIVkpxYhB0YwYFM2Ns3NweXysLqlncWEtiwpq2F7p6FK/vtXNexvKeW+DMSZjRGo0c/KM8QpThyZgt4ZmALYQQvQnCQ6EEEL0KDkshm+3tPLtllZ8U3/FluwpLC1fytKypWyq3YRfd/zq3uxu5pOST/ik5BMAcuNzmZU+i5nTZzLx2+dhM9vwt7fTvn17R8Cw8dDjF+ICZYUPPBDsWbDn5xM+9sjHL9itZmMWo9xkfnnuKKqbXcH0o8WFtdS1urvU31HlYEeVg2cWF2OzmDh5aAJzArMgjUiNDsngayGEONokOAghpVQ0EB3YtPr9/oNVF0KIY4s1IvjV7G1nXPI4xiWP45bxt9DU3sTyiuUsK1vG0rKl3VZwLmwopLChkBe2vEC4JZypg6YyM2Mms4bOImv8VcF6vqYm2jZvDqYjtW3ciK/2AOMXvviCli++CJbZsrO7pCOFjRx5ROMXUmLsXDI5k0smZ+L3a7ZWNAd7Fdbsrsfj61hcwe31BwKJWvgIUqLDAjMgJTFzeBJJUX03jkIIIY4mCQ5C6y7gd/s2qqqq+rEpQgjRS9ZO03x6uq5/EBsWy9nZxsBmrTWFjYUsLTN6FdZWr+2yvkKbt42v9n7FV3u/AoxxDjPTZzIrYxZTBk0hauZMombOBIzVoL2VlYFxC5uoWLQIS8luTO1dBxIDuEtKcJeU0PxeYPyC1Yp9xAijdyEwS5ItJwdlMvX61k0mxdiMWMZmxHLL3GE43V5W7qrnq4IaFhfWsLOm63iKakc7b6/by9vr9gIwNiMm0CuRxElDErBZet8GIYToDxIchNZ9wNOB7x+npqbm92djhBCiVzr1HOBpO2A1pRR58Xnkxedx/djrcXqcrK5cbQxsLl9KqaO0S/1SRyn/2vEv/rXjX1hNVialTmJm+kxmZswkNy4Xa1oa1rQ0Ys46k62TJ4Pfz/TBg7ukI7l27ABv1wXe8Hhwbd6Ma/Nm4HUATJGRndZfyCd83Dgsqam9TgGKsFmYNzKFeSNTAChrbGNJYBG2JUW1NLV1HUuxuayZzWXNPLFwJxE2M9NyEpmdawxuzkmKlBQkIcQxS4KDENJaOwAHgFLKY/oGv14JIUS/6dJzcODgYH8R1ghOyTqFU7JOAWBP857gWIVVlato83acy+P3sLJiJSsrVnL/2vtJCU9hZsZMZmTMYHradKOSyUTY8OGEDR9O3MUXARjjF7ZtCwQMm3Bt3Ih79+5ubfG3tuJcuRLnypXBMktycpfpVO1jx2KOienNkyEjLpzLpwzm8imD8fk1m8qaWBToVVi3pxGfvyMFyen28cX2ar7YXh08dl+gMHNYErER1l5dWwghQkmCAyGEED3rHBx4Dz842N/gmMEMjhnMlSOvxO1zs656XXBthcKGwi51q9uqeafoHd4pegeTMjHENoRR9lEk1iQyOnE0ZpMxQ5ApLIzwCRMInzAheKyvsZG2zVtwbdpI26bNBxy/4K2poeXzz2n5/PNgmW3oUOz5Y7/R+AWzSTEhK44JWXHccVouzS4Py3fWBQc2767rmpJV1tjGv1aX8q/VpZgUjMuMY05eMnNyk5iQFYfFLD8kCSH6jwQHQgghenaYaUW9YTPbmJY2jWlp07iTO6lqrWJZ+TKWlC1hecVyHO6O6UT92k9xezHF7cV89NFHxIbFMiNthtGzkD6D5IjkLuc2x8URNWsmUbN6GL+waWPH+gvOri/rAO7iYtzFxT2PX+i8/sJh9ADH2K2cNWYQZ40ZBMDuulYWFdayuKCGZTvraGnvSIfya/i6tJGvSxt5+PNCosMsTB+WGAgWkhmcGHGgywghREhIcCCEEKJnlk6Lfnm6v1D3hdTIVC7KvYiLci/C6/eyuXYzS8uXsqxsGZtqN6HpSM9pam9ifsl85pfMB2BE/AhjBqSMWUxInoDV3DU9RynVZfwCgPb5cO/a1XX8QkHB4Y1fiIoyxi/k5xuzJI0bhzU19ZD3OCQxkqsTI7l62hA8Pj9flzayuKCGRYW1bNzbSKcMJBztXhZsrWLBVmMCi5zkSC6emMFFkzLJiAs/wBWEEKLvSHAghBCiZyHoOTgYi8nChJQJTEiZwG0TbqPR1chznz/H1rat7PLvorata4rQjoYd7GjYwfObnyfCEsHUtKnG2goZM8mMzuzxGspsJiw3l7DcXOIuuRgAv8uFa9u2jtWdN23qefxCSwvOFStwrljR0eaUlI7pVPPHHnL8gtVsYkp2AlOyE7jzzBE0Ot0sLTJSkBYV1FDe5OpSf1dNK/cuKOC+TwuYOSyJSydnctaYQYTbZAE2IURoSHAghBCiZ53HHLiaQGs4irPsxNnjmBw5mcmRkznllFMoaCgIzoC0vmo9Xt3xa7/T62Rh6UIWli4EIDsmmxnpRgrSlEFTCLcc+Fd3k91OxMSJREycGCzrMn5h3/oLdXXdjvVWV9Py2ee0fNZp/MLwYST+zw3EXnThIWcliouwcd64NM4bl4bWmp01rcGxCst31tHm8QHGo19SZMyMFBVm4fxxaVw6OZPJQ+Jl5iMhRJ+S4EAIIUTPOvcclK6Ex6bCpGth/JUQmXhUm6KUYkTCCEYkjOCG/Bto9bSyqmIVS8uXsqRsCWUtZV3qlzSXUNJcwmvbX8NmsjI5Lo+ZMcOZGZnFMGVHuVvA1QztgU/wuwNczZjbHUS1O4iKSIDTTkZfdw1eey5t5S5j0baNm2jbsgXd0/iFop1U/PKXOBYsYNAf/4A1JeWw73F4ShTDU6K4fuZQ2tw+Fmyt5K21e1lSVIsOpB+1tHuDA5qzEyO4dHKmpB0JIfqMBAdCCCF6ljISwmKMl2aA2gJY8Cv4/A8w8nyYfC1kz4G+nqbZ5w2+tEc5dmH2tcGOti4v85GuZua1O5jX3ox2xbDb52Wpr5Glqp3VFo2r06/pbr+H5fVbWF6/hXuBVK+XWW0uZjrbONnlIqZz0v/+mlqhqRS1+S2sgDUslpjBJ8O0aeiMm2l3xeHatiMwnWrX8QstCxey64JvMei3vyHm3HN7/Qt/uM3Mtydk8O0JGZQ3tvHO+jLeWruX4tqOBdhK6pxd0o5GR3iZnCopR0KIb06CAyGEED0Li4abF8KyR2DTW7BvJiGfG7b8x/jED4VJ18CE70FUijFwucuv8E1dfpGn3bHfL/XNXcvbHV0GP5+078vXB26mArIDn+8B7QrWhtlZGmFnWbidIputS/0qi4W3o6N4OzoKs9aMa29nZpuLWU4Xo9xuDhrqtDdB4QIoXIAC7GYb9vSJxM2cBlfeiD8xn+qnXqbhn/8EwN/URPldd+P49DMG/e63WOLjD/fpd5EeF85t84Zz69xhrNvTwFtr9/L+horgzEfBtCPAboYLGzdK2pEQ4huR4EAIIcSBJQ6DCx6EM//PCAbWvgRlazr2NxQbPQlf/AlQoH391dKgMA0zXC5meIF2G5V2C0vDw1hq1azAhQN/sK5PKdbb7ay323k0HuKtUUxPmsCstGlMz5pDUmsD7FkBe5Ybf1uru17M5zZSrkpXwtKHMAGDkkcS/f3JVPynCE9NEwCOjz/GuXo1aX/8A9GnnfaN700pxeQhCUweksBvzx/TY9qRy0cw7WhoUiSXTMrg4kmZpEvakRDiMEhwIIQQ4tDCoowegknXQOVmWPcSbPi38Us6gPYf/PheUUY6kz2GFq8JnzmC2NQsoywsGuyBv2Gxnb7HdC0PiwaL0WMwCLgk8PH6vWyq3WQMbC5bypa6LV2u3OBp4aOKJXxUsQTW3cuohFHMzJjJzFN+xPjk57A2lnYNFuoKu7Wemu1Esp2hcxTV62No3BUJgK+ujr233U7st79N6q9+2etVmffXU9rRy4sLqHJ2pEkV13bMdjRruDHb0ZmjZbYjIcSBSXAghBCidwaNhXP/Aaf/Aba+awQKe5Yb+yz2/V7i932P7eHlPqZTnU4v97ao4KxIaxYuBGDu3Ll90nSLycLElIlMTJnIDyf+kHpXPcvLl7O0bClLy5dS76rvUn9b/Ta21W/j2U3PEmmNZFraNGakz2DWab8kPSodWmu7BgsVX4PfSPUxWzVpU5uIznRRsSoOr8t4IW96911av/yYtB99l6hLvm88myO0L+1oNKUUNfrZpZO7pR0tLqxlcWEt0WEWzh9vzHY0abCkHQkhupLgQAghxDdji4AJVxoftxNMluCv9QNFgj2B83LO47yc8/BrP9vrtwcDha+rv8bXKU2q1dPK53s+5/M9xrSlQ2OHMjN9JjMzZnLSab/BbrEbz6F8XUewULqKqPRmcs6ppnJdLM27jRmgvM3tlP7pBQYvuZ/IqZNg+OmQewakjj2i6WKVUuTGm7lp7rgDph052r28vqqU11cZaUeXTs7kookZknYkhAAkOBBCCNEXbBGHrnOMMykToxNHMzpxNDeNuwmH28GqilUsKTdSkCpaK7rUL24qpripmFe2vUKYOYyTUk8yUpAyZjJ0yN3GL/J+H1RvxbxnBRmTlhL9xSIql1nwtRu9CC0VViJ3L4XdS42xG7GDYeqNxpSx4XFHdD+HM9tRcW0r//hkB/cu2CFpR0IIQIIDIYQQokfRtmhOG3Iapw05Da01xc3FRq9C2VLWVK2h3dcerNvua2dpudHjwGpIi0xjZsZMZqXP4uS0k4kalA9TbyLmEh++Z+6n8oHnAfC59psbqWkPfPpbWHgPTLsF5v0STEf+or7/bEdvrtnLBxsl7UgI0Z0EB0IIIcQhKKXIic0hJzaHq0dfjcvrYm3V2uCKzcVNxV3qV7RW8FbBW7xV8BYWZWFc8jhmZcxiZsZMMod3rMTszTgVLjwTij6Fos+MqV8BPK2w+F4YOhty5vbpfeyb7eh3F4zhky1G2tHSnZJ2JIQwSHAghBBC9JLdYg+mEAGUtZSxtGwpy8qXsaJiBa2ejtQdr/ayrnod66rX8fD6h5lcE83PAvvaG5s6xm14XLDpTfjq70YPAkB9cZ8GB52F28xcODGDCydmUNbYxjvr9vLW2r2U1HWsMyFpR0KceCQ4EEIIIY5QRlQGl424jMtGXIbH72FD9QYjzahsKdvqt3Wpu9fcHPxesXcbD35+GxcMu4B5WfMIm3S1sRL1soeNCm0NR6f9ceHcfmout80bztrdxiJrknYkxIlJggMhhBCiD1lNVk4adBInDTqJH036EbVttSwvX86SsiUsL19Oc2THdKnRTli0dxGL9i4i2hrNmdln8i3lZiLGys+01R/oMiGhlOKk7AROypa0IyFOVBIcCCGEECGUFJ7EBcMu4IJhF+DXfrbVbsX/8BWYvD7sHrB5NG6rwuFx8Hbh27wNZGamcUGLkwsc5WT1U7sl7UiIE5MEB0IIIcRRYlImxiSPpTAxCW9VFQAPfjmYJamNrEpqpngQ+MyKvVYrT8TH8kTLaibOv4YLhl3AWdlnEWM7slWVvylJOxLixCHBgRBCCHGUWQcNCgYHSWuLuRC4EPBazewapNma4WdHpqIgQ7G+ej3rq9fzt5V/Y97geXxr2LeYnj4dq8l61NstaUdCHP8kOBBCCCGOssRbfkDFz3+Br7GxS7nF4yOvFPJKAYy37fIE2JGhKMh0saXqYxYUf0x8eCLnDj2Xbw37FiMTRvbLr/PfNO3orDGDsFsl7UiIY5UEBwehlPol8GfgMa317f3dHiGEEMeH6LlziVq6hPaiItrWr6dt/Xqc69bjKS3tVje9HtLrNfM2GcFCix0K0msoyHyZP2S8jB41nLNHX8h5Oecd7dsI6n3aUXog7ShO0o6EOMZIcHAASqlpwE3Axv5uixBCiOOPMpuxjxiBfcQI4q+4AgBvTQ3OtWtoe/Jm2mpsuBqsaH/Xl+coF0zapZm0ywgW/KqAkpS/82rmvTiGppOQN4Mp7ilE2iKP/j0ddtrRHl5ftYecpEgumZzJxZMySIuVtCMhjgUSHPRAKRULvArcAPy2n5sjhBDiBGFJTibm7HOIWesDTy1+H7jOfY+2rTtp+9roXfDV1XU5xqQhpwpyqvywdi/wBhseeZPmvHRSp51C3pwLiBg1GmWzHdV7OZy0o12SdiTEMWfABQdKqUuBU4AJwHggGnhVa33VQY7JBP4InA0kAhXAf4E/aK17WmHmaeAtrfUXSikJDoQQQhxdEQnQ1IrJDBGfX0ZExmS4cDr6h7/Do9Jp21qEc/16WteuxV20E7XvZ/mAeIcmfm0ZrH2NPY+9hs9qxjJmJAlTphM+cRLhEydgiY8/arcjaUdCDBwDLjgAfo0RFLQAe4GRB6uslBoGLANSgHeB7cBU4EfA2UqpmVrruk71bwKGA1eHpPVCCCHEoaSMhqbA+AOvC3Yvhd1LUYBNmbCljiF26nS49Hp8Cfm07aqmeuUiihfOJ353LeHursGC2eNDf72Fuq+3BMts2dmET5xI+MQJREyahC0nB2UyhfS2JO1IiGPfQAwOfoIRFBRh9CB8eYj6j2MEBndorR/ZV6iUuj9wrj8DPwiUjQD+AszWWrv7vulCCCHEYTj/fvjqHiheDA3FXfdpP1RuMj6rnsYMRMUNISpnBp60WTRG5WFNTmDTl2/j+vprcva4SW3sfgl3SQnukhKa3nkHAFNMDOETxhMxcaLRu5A/FlNk6MYtSNqREMemARccaK2DwcChuhqVUjnAmUAJ8Nh+u38H3AxcrZS6S2vdCkwHkoDNnc5tBuYopX4ARGqt2/vgNoQQQogDi82EbwV+z2qugNIVsGcF7F4GVZuNAKGzxt3QuJsR+7bDE8gfOh3PKdezKCKCf5aspXbNcoaXehmxV5NTCZb9TuFvbqZ10WJaFy02CgIDpo3ehYlETJqIJS0tJGk+knYkxLFD6f3yFAcSpdRcjJ6DHsccKKVuBJ4BntZaf7+H/Z9gBA+na60/V0rFAZn7VXsBKMToUdiiD/HAlFJrD7BrZG5ubsTTTz990HsKBYfDAUB0dPRRv/ZAJM+rd+R59Y48r96R59Wd2eskpnkHsU1biW3aSkxzAWb/wTu7fSYbpbG5fBiTyKeWVva4q8mphLy9mpF7NXllmljnQU9hnCcuDk9ODp5hObhzhuEdnAXm0PyK3+7TrK3ysbTMw9Y6Pz39x3dQhGJWhoUZGRYS7L1PiZL/f/WOPK/e6c/ndfPNN1NYWLhOaz25t8cOuJ6DXtr3I0rBAfYXYgQHecDnWutGoLFzBaVUK1Cvtd4cojYKIYQQh81niaAhYSINCRMBUH4P0Y5dxDZtJbJuEwktO7D5WrocY/a7yW7Ywm0NcBuwy2rj7fh05meYeX+aB7QmtQFGlGnjs1eTWQP7v26bGxsxr1uHfd06ALTViic7G09ODu5hOXhyctBRUX1yn2FmxYx0CzPSLdS1+Vla7mVpmZcqZ0eYUOnUvFXo4e1CD2MSzczKsDAp1YzNLL0JQnxTx3twEBv423SA/fvK4/rqggeK0JRSa6OjoyfNnTu3ry512BYuXAhAf1x7IJLn1TvyvHpHnlfvyPPqnYULF4L2M3dshpGCtGcF7FlupB11kuNx89PqEu4C1tjDeC8qkk/jo1iUoFiUb9SJcGlyyzWjy82cXBtLWokD1dY1s1Z5PNgKC7EVFrJvdIJt6NCOgc4TJ/bZQOdLAK11z2lHwOY6H5vrfETbLZw/Lp3rZ2aTl3rwX2zl/1+9I8+rd/rzeR1Jb8XxHhwcyr6fFg6YKqS1nnt0miKEEEL0AWWC5BHG56TrjbKmsk7jFpYb4xbQmICprnamutr5ZV0DX0SE835UJCvC7Tjtig05ig05mtdpxOTXjG9O5lvOPMZWWrBsLsJTVtbt8u7iYtzFxTT95z8AmGJjOwY6T5hI+Lh8TBER3+zWDme2I1fHbEdnjk7ltnnDGZ8V942uJ8SJ6HgPDvb1DMQeYH/MfvWEEEKI409sBsReAmMvMbZdTVC6yuhV2LMC9q4hwtfO+a1Ozm91UmU281FUBO9FRVIUWDzNb1Ksj2tgfdxKSIdRp43gkoRLmNWQgmVLEc7163Bt3QYeT5dL+5uaaP1qEa1fLTIKzGbsI0d26V2wpqf3+pYOZ7ajBVurWLC1ilnDk7h13jCm5yTKAGYhDuF4Dw52BP7mHWB/buDvgcYkHBGlVDTGIm0AVr/ff7DqQgghxNFhj4XcM4wPgLcdyr8OBAvLSd2zguubGrmuycF2m5X3oiL5KCqS+k6Dj7c17OD/GnZgRjFrZB4XXHg5c7LORG8vom39epzrv6Zt3Tp8DfutNerz4dqyBdeWLTS88goAlkGDgoFC+MSJ2EeORFmth307nWc7Wl3SwLOLd7Fga1Vw/5KiWpYU1TJxcBy3zR3OaaNSJEgQ4gCO9+Bg37SnZyqlTFp3zP0WeHGfCbQBK0J0/bswpkwFoKqq6iBVhRBCiH5iCYPBJxsffgx+P9TuQO1exqg9Kxi1ZwV37tnD8nA770ZFsjAiArfJeLn2ofmqaQdfrfwj0Sv+yFlhg/jWyNOYcOGPISEHz549RqCwfj1t69fRXljU7fLeykoc8z/GMf9jAJTdTnh+frB3IXzC4a3orJRi6tAEpg5NYEelgycWFvHehnL8gZSj9XsaufHlNYwcFM2t84YTpTUmCRKE6OK4Dg601juVUgswZiS6DXik0+4/AJHAU4E1DkLhPmDf3KUfp6am5ofoOkIIIUTfMZkgZZTxmXIDANamvczZs4I5u5fRtGc5C9r28H5UBOvt9uBhDgVvuSt5a+erZG1/kQva4fyEcWRlzyXuBxdC6m/wtbTStnGj0buwbj1tGzeinV3nUdUuF87Vq3GuXh0ssw0dSvikiUTNnkP0WWce8pf/EYOiefCKidx5xgieXLSTt9bsxe0zfiPcXungjtfXkxqhOHeoleleH2EWWVhNCBiAwYFS6kLgwsDmoMDf6UqpFwPfa7XWd3c65FZgGfCwUuo0YBtwMjAPI53oV6Fqq9baATgC7faYQrwsvRBCCBEysZmQfynkX0os8J22Br5TuorSXZ/yfsUy3vM3UGbpeK0otVp53AqPu7cxaeN6Llj2f5zZrojJPImowTOIOn8a/OAGtCmM9oICnOvX07ZuPW3r1+MpL+92+eBA57f/Q/xVVzHo14f3n+/BiRH85aJ8fnRaLs8u3sWrK/fgdPsAqHJqXtjiZv7fF3LTnByunJpFhG3AvRoJ0acG4j8BE4Br9yvLCXwAdgPB4CDQe3AS8EfgbOBcoAJ4GPiD1ro+1A0WQgghjjvh8ZB3Fll5Z3ErcIu7jfXb3uS9ne+ywFGEg45xduvsdtbZ7fzVr5nn2MS3Vq5g+kIXVpMFlTYe++Dp2CdNgwt/DpFJeKqqaFv/NW3r1+Fc/zWurVvB6w2er+GVVwifMIHY88877Oamxtj51XmjuXXucF5cVsKLy0poajMGT1c2u/jTB1t59ItC/mfmUK6ZkU1s+OGPeRDieDLgggOt9e+B3/fymFLg+lC0RwghhBCgbOFMGn8Nk8Zfw8+9Lhbu+ZL3t7/O0pqv8QVmDHebFJ9ERfJJVCQJPh/ntrTy7ZqNjChbi1r+qHGixFysg6dhzZ5FzKwbICoFv8uFa/Nmap95JjjrUcVvf4t95AjChg/vVTvjI2385Iw8bpqTw59e+5KPSzw0tRvta3B6uO/TAp5atIurpw/hf2YOJTk6rO8ekhADwIALDgYSma1ICCHEichusXN2zjmcnXMOtW21zC+ez/s732db/bZgnXqzmVdiY3glNoZct5tvOVo5t9VJSl0h1BXC+n8aFdPGYxp+BhG5Z5Dx93souewK3Lt3o51O9v7oxwx949+YIiMP0JIDiwqzcM5QK6cNtlAVmcNTX+1kb0MbAC3tXp5YuJPnlxRzxZQsbpqTQ2b8N1ubQYiBRpLgQ+suoCzwyZfZioQQQpxoksKTuHr01bxxwRu8/a23uX7M9SSHJ3epU2izcV9iPGdkpfOD1GQ+jIygbd+A44oNsPheeP4szE+MI+P8aFRgXIB7506qH3roiNpnMyuunjaEL++ey/2XjWd4SlRwX7vXz0vLdzP3Hwu5+80NFFW3HNG1hBgIJDgIrfuAjMBnU2pqaj83RwghhOg/efF53HnSnXx66ac8dfpTnJdzHnZzx2xHfqVYGhHOz1OSmDtkML9JSmS1Paxj9IKrCXvdAgZNqAke0/z2v9HLn4Kt78GeleBxfaO2Wc0mLp6UyYIfz+HJqyaTn9GxfqrXr3lr7V7OeOArbn11LZvLZO1UcfyStKIQktmKhBBCiO7MJjMzMmYwI2MGLSe38Nmez3hv53usruyYutSpNP+NjuS/0ZGkmcI4v6WNC+orGerxEju0jZpNMXjbzPha3Thf/jWRqW7jwJgMuGWpMWD6GzCZFGePHcRZY1JZUlTLo18UsbLYmLtEa/hoUyUfbarklLxkbps3nKlDE474eQhxLJHgQAghhBD9JsoWxYXDL+TC4RdS3lLOB7s+4P2d71PSXBKsU+Fv55kIE89EpDMuLJkLXD6mD96Nd0c4AI694R3BQXMZbPsAJl19RO1SSjE7N5nZucmsKann8YU7+WJ7dXD/VwU1fFVQw5TseG6bN5xT8pJl1WVxXJCfsoUQQghxTEiPSufmcTfz3oXv8eq5r3L5iMuJscV0qbOxvYY/q3p+P76j3OXPhLTxHZVKV/Zpu07KTuD566bw4R2zOH9cGp1jgNUlDVz3wmrOf2QJH22qwLdvOWYhBigJDkJIKRWtlEpXSqUjsxUJIYQQh0Upxbjkcfx62q/58rIveXDug5yadSoWU0fCg8PW8d/ULZ5W3hh3Lp59BaWrQtKuMemxPPrdSXx+5ylcflIWVnNHlLClvJlbX13HGQ98xZtrSvH45L/5YmCS4CC0ZLYiIYQQ4gjYzDZOG3IaD536EF985wt+efIvyU/Kx2vuqKPdbv5U8CrnZqXzRnQU7tod4AzdGqc5yVHcc+k4vvrpPK6fmY3d2vE6taumlZ++tZG5/1jIS8tKcHl8IWuHEKEgwUFoyWxFQgghRB+Jt8dz5cgree2817j3zIeD5dbA4smVFgt/SkrgvKx03ljzEG6fO6TtSY8L53cXjGHJz07ltnnDiA7r6Nkoa2zjd+9tYdY9X/D4wiIcLs9BziTEsUOCgxDSWju01uVa63JAZisSQggh+khWwtDg9xRLPAn2jlmDKi0W/lT8H879z7n8e/u/Qx4kJEWF8dOzRrL0F6fy07NGkBhpC+6rbXHz9493MONvX3Dfgh3Ut4a2LUIcKXlbFUIIIcSAY7J1vIBHKTvzL57P3dFjSPB1pPFUOav4v5X/d9SChBi7ldvmDWfJz07l9xeMJj22Yw0Hh8vLI18UMfNvX/DH97dS0dQW0rYI8U1JcCCEEEKIAUeFhQW/6/Z2IqwRXBs7mvml5dxd10CCqePFvHOQ8K/t/wp5kBBuM3PdzKEs/Ok8/n7JOIYmRQb3tXl8PL+0mDl//5Kfv72RktrWkLZFiN6S4EAIIYQQA47q1HOg3YGXfZOVCK25ttnBx+kXcPdJd3dJN6pyVvHnlX8OBgkeHdpxADaLicumZPHZnafw6HcnMiqtY/pVj0/zr9WlnHrfQu54fT3bKppD2hYhDpcEB0IIIYQYcLoEB+3txhdzx4DgcO3n2jHX8vElHx8wSPhj2R9Z5FhEu689pG01mxTnj0vnoztm8cJ1U5g8pGP1Zr+G9zaUc85Di7nxpdWs29MQ0rYIcSgSHISQrHMghBBChEaX4MDjwbl6NVp3mt/UZ/QKhFvCg0HCT0/6KYn2xGCVRl8jb9a/ybn/OZfXt78e8iBBKcW8kSm89YPp/PvmaczJS+6y/7Nt1Vz8+DKufHoFSwpr0VoWVBNHn+XQVcQRuAv43b4NWedACCGE6BvKZELZbMGUot1XX4PJbiUyKZ7IQe1E5TZj7VQ/3BLONWOu4TsjvsObO97k+c3PU+eqA6DaWc1fVv6FZzc9y435N3Jx7sWEmcN6uGoftV0pTs5J5OScRDbubeTxL3fy8ZbK4P7lu+pYvquO8Zmx3DpvOGeMSsVkUgc5oxB9R3oOQkvWORBCCCFCJOrUU7ts+10eHHvDqVwTR9GfPmPneedT9de/0bJkKf5A6tG+IGH+JfO5OP5iok3RweP3BQnnvn0ur217LeQ9CQDjMuN48urJfPqTOVw8KQNzpyBgw94mvv/PtZz14CLeWb8Xr6y6LI4CCQ5CSNY5EEIIIUIn4757yXz0EeIuvxxLelq3/e6dO6l/6SVKb7yRgpOnsefmm6l/+Z+0FxdjN9uZFzOP32f8nv+d8r9d0o2q26r566q/HtUgITc1mvsvm8DCu+dy9bQh2Cwd7wyF1S385N8bmHffQl5duVtWXRYhJWlFQgghhBiQlNlM9OmnE3366WitcX/4EK2v/p2WijCcteFob0fOvna5aF20mNZFiwGwZmQQPSwH9+gxnDb1Ri7Nu5S3Ct7i+c3PU9tWC3QECc9teo7/yf8fLs27NKTpRgBZCRH86cKx/PC04Ty3pJhXlu+m1W0EA6X1bfzqnc089FkhN83O4bsnDyYyTF7lRN+Sn7KFEEIIMeAppQjLSiNhRCuD59aT96uTyXrmaeKvuRrb0KHd6nvKyohYtJi4J5+kYPp0qv/nB5yz1MV/Rz/Iz076X5LCk4J1q9uq+duqv3Hu2+fy6rZXj0pPQkq0nV+cM4plPz+NO8/IIy6iYwRFtaOdP3+0jZn3fMGDnxXQ6JRVl0XfkXBTCCGEEMcHc8cLtKl4AVEjTyfqF7+AXyrce/fSumQJLYuX4Fy+HL/T2XGcx4Nz1Sqcq1YBcHJyMqfMnM7GHDOP25ayR9UDHUHCc5ue44b8G7gk9xLsFjuhFBth5Y7Tcrlh1lBeX7WHpxftotphBCeNTg8PflbIM4t2ce2MbO44LRe71XyIMwpxcBIcCCGEEOL4kD4BLHbwusDdAu/eCgXz4fyHsGVmYrviCuKvuALtduNc/zXbXnsV29atWEv3djmNt6aGlv++Rw5wr8lEy/A0vspoYmlWGzvToKat5qgHCZFhFm6cncPV04fw9toynvxqJ3vqjQCn1e3j8YU7+aqghse+O4nsTisyC9FbEhyEkFIqGtg3DYKscyCEEEKEUkIOXPcR/OcmqN9plG17H0pXwbcfg9wzAGONhMiTp9LS5oSLLmLm6NG0Ll1G6+LFtC5diq+pqeOcfj9RBWWcVwDnAS3hig3Z8HWOYkOO0ZPw7KZnuWHsDVyad2nIg4Qwi5nvnjyYy07K5MNNFTz2ZREFVS0AbClv5vxHlnDPJeM4b1z3AdpCHA4JDkJL1jkQQgghjqbMyfCDxbDg17DmeaOspQpevRSm3Ahn/AlsEV0OsaakEHfRhcRddCHa58O1eTMti5fQungxbRs3QqfFyKLaNDO3wcxtRllJCnydU8X72//Ki8Of5boJNx6VIMFiNvHtCRlcMC6df67YzZ8/3Ibb56el3cttr61jZfEQfnXeKMIskmYkekcGJIeWrHMghBBCHG22SDj/AfjuGxCZ0lG++ll4ajaUrT3gocpsJnz8eJJvv43sf/+L3GVLybj/PmIvughzclK3+tnVcOEKze9f8/P3v1Xh/9lf+MsvTuGNhY/i8rpCcXddmEyKa2dk8/YtM8hKCA+Wv7x8N5c+sZw9dc6DHC1EdxIchJCscyCEEEL0o7yz4NblMPL8jrK6InjuTPjq7yj/odcLsMTHE3PuuaT/9S/kLlrE0P++Q/JddxJx8slgtXapG+6GKYWaK99vJv8Hj7F87kksvOsa6hd+jt8V2kAhPzOWD344m7PHDAqWbSpr4rxHFvPx5oqQXlscXyStSAghhBDHr8gkuPwV+PpVmP8zY6Cy3wtf/pkJMSPYPvInh30qpRT2kSOxjxxJ0k034WtpxblqJS2LF9OyaBHesvIu9QfV+uDD1VR9uJoKm4XIKVOJmTOHyNmzsQ0dilLqAFf6ZmLDrTxx1SReXFbCXz7ahsencbi8/OCVdVw3I5tfnjuqy+JqQvREggMhhBBCHN+UgolXwZCZ8M4PoHQFALHNOzhpzY8htR0mXWvU6wVzVCTRp55K9KmnGouwlZTQtGghuz75D+Ebi7B5O+qa3F7ali6jbeky+OvfsKanEzlrFpGzZxE5fTrmqKg+ulXF9TOHMnFwPLe/to69DW0AvLishPV7Gnj0u5PISog4xFnEiUzCRyGEEEKcGBKGwvUfwam/AZPx+6jZ74L3fwSvXwktNd/41EopwoYOJeXa65n22vvkrVzBzt99jy9mRLI3sXt9T3k5jW+8QdkP76Bg2nR2X3U1tU89jWvrVnQfzG44ISuOD384mzNGd4x33LC3ifMeXswnWyqP+Pzi+CXBgRBCCCFOHCYzzLkbbvyc1ojMjvKC+fD4NNgxv08uEx4Zy/lX/pobn1lK7XO/5Td3pfDU2SZWjlA4w/ar7PXiXLOGmgceoPjiSyiccwrlP/s5TR98iLeh4Ru3ITbCytNXT+bX543CYjJ6RZpdXr7/z7X86YOtuL0yxbroTtKKhBBCCHHiSZ/A2sn3k7PrJTLLPjTKnLXw+hVGitFZf4GwI0/1CTOHceXIK7k492LeKXyHZzY9Q52jitxymLDLz4Rdmpz9fsj31dbS9O67NL37LiiFPT+fqEAKUnh+Pspy+K9vSilunJ3DpCHx/PC19ZQ1GmlGzy0pZu3uBh797kQy4yXNSHSQngMhhBBCnJD85jCKcm+Gq/4D0Z0WDVv3Ejw5y1g8rY+EmcO4YuQVzL94Pj+f8WvqR6Tyr1PM/Px6CzfdYeaR802syg/DEx3e9UCtcW3cSO3jj7P7yu9SMHMWe3/yExrf/g+equrDvv6kwfF8eMcsTh/VMbXr16WNnPfwEj7bKuswiQ7ScyCEEEKIE9vw0+CWZfDBT2Drf42yhmJ4/ixIygMUKJMxYFmp/bZNh9juWt+mTFyhTFxsSeM/9hiecZdSHdnO4nzF4nwfyu9lQm0kV9cMYVhRO+3bi6HTGAR/UxOO+R/jmP8xAIk33kDK3Xcf1m3GRdh45pqTeHZxMfd8vB2vX9PU5uHGl9dw85wcfnrWCKxm+d34RCfBQQgppaKB6MCm1d8HA4yEEEIIEQIRCfCdF2HjG/DR3dDeDNoPNdtDcjkbcAVwMfBOdBTPxMVQZbGgTYr1Ke2sTykgYaSPm6Y0c1aRF0+5jdZKO962rise173wIkl33IHJZjus6yqluGnOvjSjdZQ3GesvPL1oF2tK6nn0u5NIjws/xFnE8UzCw9C6CygLfPKrqqTbTgghhDhmKQXjL4dblsLQOUflkjbgckcLH5WW8+vaelK9HfOf1pvN3JMez6UzE1hwhp/0b1cz9OxqUiY0oczaqOTz4d+5stfXnTwkng/vmM28EcnBsnV7Gjn34cV8uf3w05XE8Ud6DkLrPuDpwPePU1NT8/uzMUIIIYQ4DHGD4dr3obHUWDRN+0Fr4y+607beb3v//Qeo7/eBpxXcrdDeAu5WbG4Hl7tbucjVzH9dpTzjraQSYwXnerOZ+xLjeSEuhutjmrksvgVLYSSeVuM1zv/R72HkZ71epyE+0sZz107h6cW7+McnO/D5NY1OD9e/uJofnDKMu87MkzSjE5AEByGktXYADgCllMdkkn/AhBBCiAEjLuuoX9IGXAZc6HPz36L/8symZ6hsNaYzCgYJqRncF+0mstVICfLv+doYKzHmol5fz2RS/OCUYUwOzGZU2Wyc88mvdrKmpJ5HvjuRtFhJMzqRyNuqEEIIIcQxxma2cdmIy/jwog/5zbTfMChyUHBfvddJqcUV3PZ4TLD88SO63pTsBD68Yxan5HWkGa3Z3cB5Dy9h4Q5JMzqRSHAghBBCCHGMOlCQ0GbrSCF6whZPpaP0iK+VGBXGC9dN4adnjSCwZhr1rW6ue2E1f/94Oz6/PuJriGOfBAdCCCGEEMe4/YOEluSOhcvmLTTz43YTC0sXHvF1TCbFbfOG8/pN00iJ7ljK+fGFO7lntYsGl8y8eLyT4EAIIYQQYoDYFyRc9Yc3cccYYwGiXPCjNzR/efN27ll1D26f+4ivc3JOIh/9aDazc5OCZQUNfn67rI1FBTVHfH5x7JLgQAghhBBigInIHsqIF19B24xUnzgn/OZfPuav+CdXfXQVu5t3H/E1kqLCeOn6qdx1Rl4wzcjhhmtfWMV9C3ZImtFxSoIDIYQQQogByD56NNnnWlFmI9UnqdkIEMpLt3LZ+5fx/s73j/gaJpPih6fl8sqNJxMbZkQIWsMjXxTxvWdXUN3sOsQZxEAjwYEQQgghxAAVkR1H5qwGlMn4FT+9Hn71Lx/K0covl/ySXy35FU6P84ivM2NYEn+cEc7oxI5XxxW76jn34cUsLao94vOLY4cEB0IIIYQQA9WgcUSltZM+vQGUESBkV8PP3/QR5ta8t/M9Lv/gcrbXbz/iS8WGKe4+yc5PTs8LrrdW2+LmqudW8sCnBZJmdJyQ4EAIIYQQYqA6+68w4XvEZLlIm9oYLB5RBj9924/FqylpLuF7H36P17e/jtZH9gJvUoofnZ7LqzecTFKUMZuR1vDQ54Vc/dxKqh2SZjTQSXAghBBCCDFQ2WPgwsfhqreJm5BE6qSm4K5xJZo739WY/Bq3381fVv6FH3/5Y5ramw5ywsMzY3gSH/1oFtNzEoNly3bWcd7DS1i2U9KMBjIJDkJIKRWtlEpXSqUDVr9f5gYWQgghRAgMPx1uXU7C975L0tjmYPFJBX5+9iGoQI/BF6VfcOn7l7K+ev0RXzIl2s4rN57MHaflBtOMahztXPXsSh7+vFDSjAYoCQ5C6y6gLPDJr6qq6ufmCCGEEOK4FRYN591L0r1vkDDBEiyeuNnHXz90G/k/QGVrJdd/fD1Pb3wan993RJc0mxR3npHHy/8zlcRIGwB+Dfd/WsC1z6+itqX9iM4vjj4JDkLrPiAj8NmUmpraz80RQgghxPFOZc8k5eWVxM3ICZblbDLz3PwWYrTxE79P+3hk/SN8/9PvU+M88kXNZucm89GPZjN1aEKwbElRLec+tJgVu+qO+Pzi6JHgIIS01g6tdbnWuhzwmEzyuIUQQggResoWwaBn3iNm3rRgWfSGcF79pJ5Jro4VlFdWruTS9y9l8d7FR3zN1Bg7r914MrfPGx4sq3a0891nVvDoF4X4Jc1oQJC3VSGEEEKI45Aym0l/6Cki58wOlrWtj+b+xc18v6Fp38yn1LvqufXzW7lvzX14fJ4juqbFbOLus0bw0v9MJaFTmtG9Cwq47sXV1Ema0TFPggMhhBBCiOOUstnIfPhhIk46KVhWvSaWqze082xlFcnejjEHL255kWvmX0Opo/SIr3tKXjIf3jGLKdnxwbJFBTWc+/BiVhXXH/H5ReiEPDhQSiUrpS5USl2klBoU6usJIYQQQogOJrudzCefwD52bKBEUb4inlG74K2yCmY524J1N9dt5rL3L+Pj4o+P+LppseG8ftM0bpk7LFhW1WykGS0plOlOj1VHHBwopSYopf6olJrQw74bgN3A28BbwG6l1I+O9JpCCCGEEOLwmaOiyHrmaWzDAy/qWlG2LImwCguPVdVwd10DlsBsRi2eFn666Kf8ftnvafO2HeSsh2Yxm/jZ2SN54bopxEVYAfD6Nbe8spbtlc2HOFr0h77oObgK+AVQ0blQKTUOeBKwA0uBjwEfcL9SakYfXFcIIYQQQhwmS3w8g597HmtmJgDaq9m7PJ12RzzXNjv4Z3kVmZ6OMQdvF77NlR9cSWFD4RFfe97IFD744SwGxdgBcLR7uf6F1VQ2yYrKx5q+CA5mAmu11vtP4n87YAb+pLWeo7U+DzgzsO8HfXBdIYQQQgjRC9bUFAa/8DyWlBQA/C43pYtTccXPY6zbzZtllZzT0hqsv7NpJ1d+eCVv7HgDrY9stqHM+AheuH4KUWHGGgwVTS6uf3E1DteRDYIWfasvgoMsYHsP5acDLuBv+wq01kuAL4DpfXBdIYQQQgjRS7asLAY//xzmuDgAfM3N7HmzDvesfxAVnsg9NXX8oaYOu98PQLuvnT+t+BMv1L6A0+88omuPSovhiasmYTEZ6y1sq2jmttfW4/H5j+i8ou/0RXCQhLECcJBSKgHIBlZqrfdPVtuCsSiYEEIIIYToB2HDh5P1zDOYIiMB8NXWsuevb+D5zoeo/Mu4uKWVf5VXMtzdsSbCeud67im/hw01G47o2rNzk/nLxfnB7UUFNfz6nc1H3DMh+kZfBAceIGG/somBv+t6qN8KyP/6QgghhBD9KDx/LFlPPoEKCwPAU1bGntvvxjvvHvjuGwwLT+X18ioua3YEj6n31XPd/Ot4fvPz+PU3/7X/spOyuOO03OD2v9eU8tiXRd/8ZkSf6YvgoAgjhaizMzECgBU91E8DKvvgukIIIYQQ4ghETJlC5sMPgcUYB+DeuZPSG2+iaYeblgkPolMu4ud7mrl/bw3RgTURvNrLA2sf4JbPbqG27ZtPSfqT03O5eFJHMsm9Cwp4Z/3eI7shccQsfXCO94FfK6WeBh4F8jAGHLuAT3qoPw0o7oPrCiGEEEKIIxR1yilk/OPvlN15F2iNa+tWyn/6v51qpJIJPGf20xzupy5S4QhXOMIX88Y/z2TaqDPJHpyPJT4ec+dPXBymQK9ET5RS/O3icVQ2uVi2sw6A/31rI6kxdmYMSwrtTYsD6ovg4D7gauCGwAdAAX/RWneZwFYpNRoYCbzSB9cVQgghhBB9IOacc/C1tFD5m98euJJPEdMCMS3QkSHeBqvepYp3ezxERURgiYvDHB+PJW0QiTfcQMTEicH9NouJJ66azHeeXEZBVQsen+b7/1zL27fMIC81us/uTxy+Iw4OtNZNgXUL/oQxC1Ed8G+t9WM9VD8X2AB8eKTXFUIIIYQQfSf+O98hLDub5vkf422ox9fYiK+hEV9DA776erSn91OOaqcTj9OJp7wctmyhfUcBwz9d0KVObLiVF66fykWPLaXa0Y7DZayB8M6tM0gJrIsgjp6+6DlAa10B3HgY9e4F7u2LawohhBBCiL4VMWUKEVOmdCvXWrPok09QLa1MGZ6Db8lzNK77L+9bIqjzWohpg6g2iG8zMUylYG/14GtoBK+3y3k8paX4mpowx8Z2Kc+IC+f566Zw+VPLaXX7KGts439eWs2/b55OZFifvK6Kw9SnT1splQhorXV9X55XCCGEEEL0H6UU2m5H2+2ET5gIEx4lquoWvv/ubTzfupNH42PxKRWoXcPlTg93eaOwWTPxEUPpa8W4q41Zj9x79hCen9/tGmMzYnn0e5O48aU1+PyazWXN/PD19Tx99WQs5r6YQ0ccjj550kqpbyulioBqoEYpVaCU+lZfnHsgU0pFK6XSlVLpgNXvlwU+hBBCCHGcSB2D6cbPufHk/+XF6gbSPR29BP+OsHJVmIPddauwVX6MzVod3Nf41tsHXNNg3ogU/u/CscHtL7ZX87v3tsgaCEfREQcHSqlpwNtADsZAZAUMB94O7DuR3YWxQFwZkF9VVdXPzRFCCCGE6EMmM8z4IRNuWMSbkeM5w9URIBSE2bgifRDvREUSOag9WN74739T9de/HvCF/8qpg7l17rDg9qsr9/DUol2huwfRRV/0HNwZOM8fgVRgEPB/gDmw70R2H8Zq0BnAptTU1H5ujhBCCCFECCQOI+bKf3HfzVv5zdRfYjNZAWgzmfhtciJ/nxVHeFZbsHrDy/+k+m9/O2CAcPeZI/j2hPTg9t/mb+e9DeWhvQcB9E1wMB1YrLX+vda6RmtdrbX+LbAYmNEH5x+wtNYOrXW51roc8JhMki8nhBBCiOOXUorLRl3J6+f/i5zYnGD5hxEWbr/Ujh7S0YNQ/9LLBwwQTCbF3y8dx8lDE4Jld7+xgVXFMqw11PribTWFnldCXgkk98H5hRBCCCHEAJIXn8fr573OxbkXB8t2261cc1k47dnuYJkRINzTY4AQZjHz9NUnMTwlCgC3z89NL6+hqLol9DdwAuuL4MAKtPZQ3kofz4YkhBBCCCEGhghrBH+Y8QfumX0PkdZIANotJm65NAzGpwXr1b/0EtX3/L3HACE2wsoL100hKcpYabmpzcP1L66ixtHera7oG5LnIoQQQgghQubcnHN58/w3STdHANBiNfPD0yqwntSRdlT/4osHDBCyEiJ4/rqTCLeaASitb+PGl1bjdHu71RVHrq+Cg+uUUl90/gDXAuxfHvh83kfXFUIIIYQQx7ismCweO/tFogOvnlVhFn42ayf2k8cE69S/+CLVf/9HjwHCuMw4Hv3uREyBpRQ27G3ijte/xueXKU77Wl8FB9nA3P0+2RjTmu5fvu8jhBBCCCFOEMOTRvHgvIewBN7nC8Kt/PXkTUTOnBysU//CC1T/494eA4TTRqXyh291BBOfbavij+/LGgh9rS/GBMzrg3MIIYQQQojj3NTBc/njlJ/zyzV/A2B5pI2nJ63h++bptCwy5repf/55AFJ+ejcquOqy4erp2extaAuue/DS8t1kJURw4+wcRN844uBAa/1VXzRECCGEEEIc/y4Y8z3KHXt4dMdrALwbZSVj/GouMM3GsXAxEAgQFKTc3T1A+NnZI9nb2MaHGysA+PNH28iIC+ec/DTEkevz2YSUUlOBOUAmYAdqgLXAx1prV19fTwghhBBCDCw3n/xzyh17+U/5IgAej1KkTVjPZHUKLV8avzvXP/c8SimS77qrS4BgMinu+854qptdrC5pQGv48b+/JiUmjMlDEnq8njh8fTZbkVJqilJqHbAcuAe4A7gZ+CXwNlCqlLq+h+NkulMhhBBCiBOIUopfn/YgM2LzgmV/sLexd0oRUad2ZKzXPfscNfff321cgd1qrIGQkxSYItXr58aX1lBc29Ps+qI3+iQ4UEqdh7Ei8gSgAngdI0D4e+B7BZAIPKuU+kun484EftwXbRBCCCGEEAOH1WTlvnNfIs9urJnrVYo7VQXOWfVEnXpqsF7dM8/2GCDER9p44fopJEbaAGhwerj+hVXUtcgaCEfiiIMDpVQqRgDgB24Bhmitr9Ja/0Jr/XOt9VXAYOD7gAv4mVJqnlLqW8C7QMSRtkEIIYQQQgw8UbYoHjv/NVL2rYFgMnFb2was59iIOu20YD0jQHigW4AwJDGSZ689CbvVeKUtqXNy48trcHl8R+8mjjN90XPwEyAK+J7W+imtdbf/NbTWfq31M8D3MKY3/SfwJlAPvNMHbRBCCCGEEAPQoMhBPH7OS0QqY5GzKouF2ys+Iv6yofv1IDxDzQMPdgsQJg6O56ErJrJvWML6PY385N9f45c1EL6RvggOzgZWaq0P+ZKvtf4vsBJIB7YBJ2utN/VBG4QQQgghxAA1InEk95/6CObA9o4wG3dvfYLUG2cSNa/TGISnn+4xQDhrzCB+e/7o4Pb8zZX85aNtR6Ppx52+CA6GAst6UX8ZoIGZWuu9fXB9IYQQQggxwM3InM3vTv51cHtpRDh/Wf4bMn58SfcA4cGHugUI188cyg2zhga3n11SzItLi0Pf8ONMXwQHVsDdi/oeoF1rLcPJhRBCCCFE0EUjL+fmUdcEt9+OiuD5Bd8n4xc3dg0QnnqKmoe6Bwi/OncUZ48ZFNz+wwdbWbClMvQNP470RXBQAeT3ov6YwDFCCCGEEEJ0cfuUuzk/syMQeCgmnPnvfo+MP9xJ1Ny5wfK6J7sHCCaT4sErJjBxcBwAWsMd/1pPQZXjaDV/wOuL4GARcIZSauShKiqlRgFnAbKqshBCCCGE6EYpxR/n3seUhI4xBL+JNrP27cvJ+NvviDrllGB53ZNPUfPww10CBLvVzLPXnMSQRGMGJJfHz72f7Dh6NzDA9UVw8ChGatEHSqnRB6oUCAzeB8zAY31wXSGEEEIIcRyymq08cObT5EQYKUIepfhRmIuSt68k4/57ugYITzxJ7SOPdAkQEqPCePx7k4LbC7ZWsbms6ejdwAB2xMGB1not8A8gB1inlHpNKXWDUupMpdQZge+vA+sDdf4ROEYIIYQQQogexYbF8sQ5L5FkiQLAYTZxq66k/t0byXjw/i4BQu3jT3QLEMakx3LO2I7xBw9+VnD0Gj+A9ckKyVrrnwG/D5zvCuBpYD7wceD75YF9v9da/7wvrimEEEIIIY5v6VHpPHr2s4QrCwBlVgu3t2zA9enPyHj4ISJPmROsawQIj3Y5/sen5wXXP/hsWzUb9zYeraYPWH0SHABorf8I5AJ/Ar4EtgM7gIWBstxAHSGEEEIIIQ7LmMQx/GPeg8GX1i1hYfxs70fo5Q+Q+fDD+wUIj1PTKUAYMSiac/PTgtsPflZ4tJo9YPVZcACgtd6ttf6d1vp0rfUYrfVorfVpgbLdfXktIYQQQghxYjgl6xR+OfVXwe2FkRHcs/FJ1OZ/GQHCnNnBfbWPPdYlQPjxabnB3oMvtlezfk/DUWv3QNSnwcHxQil1m1Jqo1KqOfBZrpQ6r7/bJYQQQghxorp81BVc32kNhNdjo3l50W8w7V5I5iOPEDl7vwDhUWP+m9zUaC4Ylx7cJ70HByfBQc/2Aj8DJgEnAV8A/1VKjevXVgkhhBBCnMB+POUuzso6Lbh9b0IsC96/CVPNJjIf3S9AePTRYIBwx2m5mAK9B18V1LB2t/QeHIgEBz3QWr+rtZ6vtS7SWhdorX8FOIDp/d02IYQQQogTlUmZ+PMp9zAxcUyw7BcJMXz9xuWYHKU9BgiOL79keEoU356QESyXmYsObEAGB0qpS5VSjyilFgfSfrRS6pVDHJOplHpeKVWulGpXSpUopR5USsUf4jizUuoKIApY1pf3IYQQQggheifMHMbDpz/JkEhjoLHbpPhhXBi7X7sIk7vBCBBmzgzWb3jtdQB+eOrwYO/B4sJa1pTUH/W2DwQDMjgAfg3cDkwAyg5VWSk1DFgLXA+sAh4AdgE/ApYrpRJ7OCZfKdUCtANPAhdprTf11Q0IIYQQQohvJs4exxNnPkeCNRqARrOZWyM8NLx6MSbdzqA//CFYt3XJEtp37iQnOYoLJ3b0HjwgvQc9GqjBwU+APCAGuOUw6j8OpAB3aK0v1Fr/XGt9KkaQMAL4cw/H7MAIPqYBTwAvKaXG9kHbhRBCCCHEEcqKyeLhM54gLLAGwh6rlR+qWlz//i62QckdMxhpTc2DDwJwx6m5mAPdB0uL6li5q64/mn5MG5DBgdb6S611oe68DN4BKKVygDOBEuCx/Xb/DmgFrlZKRe53DXdgzMEarfUvgK8xghIhhBBCCHEMGJ88nntOuZdAthAb7GH8snUb/n9fTfItNwbrOT79DOf69WQnRXKx9B4clDqM9+tjmlJqLsaia69qra/qYf+NwDPA01rr7/ew/xOM4OF0rfXnB7nOF0B5T9fYr97aA+wamZubG/H0008f7PCQcDgcAERHRx/1aw9E8rx6R55X78jz6h15Xr0jz6t35Hn1zrH8vL5s/pL/NPwnuH1NUzO3tMeyc90IbOs3A+AePpyGu+6kuk3zi8Vt+AKvwD+bYmdUornP29Sfz+vmm2+msLBwndZ6cm+PHZA9B700IvD3QKHhvslu8/YVKKX+ppSarZTKDow9+CswF3g1dM0UQgghhBDfxLyYeZwSdUpw++XYGN4z1TF8yEq0yXjdtRUVYdu0mZQIEzMzLMG67xS5Geg/lvcly6GrDHixgb9NB9i/rzyuU9kg4JXA3yZgI3CO1vqTQ13sQBGaUmptdHT0pLlz5x5Gk/vWwoULAeiPaw9E8rx6R55X78jz6h15Xr0jz6t35Hn1zrH+vGb7Z3Pnwjv5ovQLAO5JjCfNV8uoXCcNO+wADPr0U4bedivDxrUz796FeP2aggY/tqx8Zg5P6tP29OfzOpLeihOh5+BQ9qWpBUNGrfV1WushWuswrXWK1vr0wwkMhBBCCCFE/zCbzPxtzt/IT8oHwK8U/5ucSO24VpTdBkB7YSFN771PVkIE3zkpK3js/Z8WSO9BwIkQHOzrGYg9wP6Y/eoJIYQQQogBKNwSziOnPkJmVCYALpOJ24YmEXHeycE6NQ8/jL+9ndtPHY7VbPxGvHZ3A4sLa/ulzceaEyE42BH4m3eA/bmBv30+XF0pFa2USldKpQNWv9/f15cQQgghhBCdJIYn8vjpjxMbyJ6vN5t5e5Ibc0ICAN6KChpefY2MuHAun9LRe/DAZ9J7ACdGcPBl4O+ZSqku96uUigZmAm3AihBc+y6MRdrKgPyqqqoQXEIIIYQQQnQ2NHYov48aHdx+tfVrwm68Orhd+9RT+JqbuW3ecGxm4/Vw/Z5GFhbUHPW2HmuO++BAa70TWABkA7ftt/sPQCTwsta6NQSXvw/ICHw2paamhuASQgghhBBif6dFDmF0ezsA7drHK3lVWLOMngJ/UxN1zz5HWmw4V07t6D14UMYeDMzgQCl1oVLqRaXUi8DPA8XT95Uppe7d75BbgWrgYaXUf5VSfw2sW/ATjHSiX4WinVprh9a6XGtdDnhMpgH5uIUQQgghBhxli+CHDR1DSt8q/i+W73f0HtS//DKeqipunTccm8V4R9uwt4kvd1Qf9bYeSwbq2+oE4NrA56xAWU6nsks7Vw70HpwEvAicjJHuMwx4GJiutZa1s4UQQgghjifWcGa2uZjkcgHg9Xt5LnUHYaNHAaBdLmoffYzUGDvfnTo4eNgDnxae0L0HAzI40Fr/XmutDvLJ7uGYUq319VrrNK21LTBV6Y+01vX9cAtCCCGEECKUrJEo4PZOvQfvFb+P/sFVwe3Gt9+mfdcubp07jLBA78GmsiY+23bi9h4MyOBgoJDZioQQQggh+ok1HIAprnamWeIA8Gs/T4WtIHLGdKOO30/NAw+SEmPnqmlDgoc+cAKPPZDgILRktiIhhBBCiP5gjQh+/aElLfh9fsl8nDdeEtx2fPopbV9/zQ9OGYbdarwab61o5pMtJ+Z7mwQHoSWzFQkhhBBC9AdbR3Awzmdibubc4PYTrgXEnHtOcLvq3ntJirJxzfTsYNmDnxXg9594vQcSHISQzFYkhBBCCNFPAmlFAHic3D7x9uDmF6Vf0HDNOWAxFkprW7OWlq++4uY5OYRbzQBsr3TwyZbKo9rkY4G8rQohhBBCiONPp7QiPG2MSBjBWdlnBYserXmb+MsuC27X3Hc/ieEWrpnRMfbgwc8KT7jeAwkOhBBCCCHE8adLz0EbALdOuBWTMl5/l5Yvpew7M1ERRhDRXlhI0/vv8/05w4i0Gb0HO6ocfLS54ui2u59JcBBCMluREEIIIUQ/sUZ2fPe0ApATm8P5OecHix8ueYmE668Lbtc8/DBxFs21M7KDZQ99VojvBOo9kOAgtGS2IiGEEEKI/tBDzwHALeNvwaKMsQbrqtdRdPZozAkJAHjLK2h47XVump1DVJhRp7C6hQ82lh+9dvczCQ5CS2YrEkIIIYToD/sNSN4nMzqTi3MvDm4/suNZEm/5QXC77sknifG5uH5mdrDs4c9PnFWTJTgIIZmtSAghhBCin9g6pxW1ddl107ibsJlsAGyu28zGGYOwZmYC4Gtqou7Z57hxVkfvwc6aVrZVOI5Ou/uZvK0KIYQQQojjj9kGgcHH+Nzg8wZ3DYocxOUjLw9uP7rlSZJ+fEdwu/7ll4lwNDB3RHKwbElRTejbfAyQ4EAIIYQQQhx/lNpvOlNnl903jL2BcIuRelTQUMCKUWbCRo8CQLtc1D72GHNyO4KDxYW1oW/zMUCCAyGEEEIIcXzab62DzhLDE7lq1FXB7cc2PkHinT8Obje+/TbTrR2pRCuL63F5fCFr6rFCggMhhBBCCHF8OsCg5H2uHXMt0dZoAEqaS/hyUD0R06cZO30+1HNPMjwlCgC318+q4vqQN7m/SXAQQrLOgRBCCCFEPzpIWhFAbFgs1465Nrj95IYnSfjxj4Lbjk8/5dvWjoBgceHxP+5AgoPQknUOhBBCCCH6ywHWOujsqtFXER8WD0BZSxkfhe0g5txzgvtnL/w3BKYxPRHGHUhwEFqyzoEQQgghRH/pMp1p954DgEhrJDfk3xDcfnrj08Tc/gOwGNOYhm3ZwLSaHQBsr3RQ3ewKXXuPARIchJCscyCEEEII0Y8Oo+cA4PIRl5McbsxMVN1WzX/alhN/2WXB/d8v+DjYe7Ck6PjuPZC3VSGEEEIIcXzqHBy4Ww9YzW6xc/O4m4Pbz216jsibrkOFG8cPqt1LRqsRFBzvqUUSHAghhBBCiOOT9cCrJO/v4tyLSY9MB6ChvYHXa+YTOXVqcP+ouhLACA50oBfheCTBgRBCCCGEOD517jlw1kJbA7Q1gqsJXM3Q3mL0KLid2Px+ftBp7MGLm19A5Y8Kbk9s3gNAbUs72ys71j843lj6uwFCCCGEEEKEROfg4NPfGp+DuAB4LjON3VYrDk8LC3c/wGSMc0wIBAdgTGk6Ki0mFC3ud9JzEEKyzoEQQgghRD+KHtSr6hbg1oam4PZTwyNAGSlECTVlRLmNGY+O53EHEhyElqxzIIQQQgjRX8ZfCcNOhfB4sMeBPRbCYsEWDbYoY0yCJRwsdjCHgcnK2W1uhrs9ADTazTQmdfy4O7JhNwAri+txeXz9cUchJ2lFoXUf8HTg+8epqan5/dkYIYQQQogTSmQSXP1Orw4xAbfv+Zwff/ljAFYONnNWYGHkma4K1jAKt9fPquJ65uQl9217jwHScxBCss6BEEIIIcTAc2rWqYxJHAPA1syO97eJjo5xB8fregfytiqEEEIIIUQnSil+OPGHABRkqmB58t4iTH4jnWhRQU2/tC3UJDgQQgghhBBiPzPSZzDeZ6IuRlEbbZSZXC5yWyoB2F7poLrZ1Y8tDA0JDoQQQgghhNiPUoozVBQAOzr1Hpypq4Pfj8fUIgkOhBBCCCGE6MEMWwoABRkdwcEkR2nw+/E4pakEB0IIIYQQQvRgeHgqKV5vl56D1NKC4PfFhbVorfujaSEjwYEQQgghhBA9UBHxzGhzsTsF2gMLAKiqSnJoBaC2pZ3tlY5+bGHfk+BACCGEEEKIntjjmNnmwmdWFKV3FF9gqQt+X1x4fM1aJMFBCCmlopVS6UqpdMDq9/sPeYwQQgghhDhGhMczrc2F0rpLatFJx/G4AwkOQusuoCzwya+qqurn5gghhBBCiMMWHkec388Yt5sdGZ3HHRQGv68srsfl8fVH60JCgoPQug/ICHw2paam9nNzhBBCCCHEYbPHATDD6aIwvSM48BVsZ1SCDQC318/qkvr+aF1ISHAQQlprh9a6XGtdDnhMJnncQgghhBADRngcADPbXLREKPYmBsq9Xs4LawxWO55Si+RtVQghhBBCiJ6ExwOQ395OlO663sGU1r3B74sKjp9ByRIcCCGEEEII0ZNAWpEVOLl9v/UOdhdgNRvb2ysdVDe7+qGBfU+CAyGEEEIIIXoSSCsCmNHq6BIcuDd8zeTBHfuXFB0fqUUSHAghhBBCCNETsxWskQDMcDqpSACH3djla2zkzFhPsOqS42TcgaW/GyC68/v91NfX43A4aG9vP+JluSMiIgDYtm1bXzTvuCfPq3fkefVOfz8vpRRhYWFER0eTkJCATJQghBCHEB4PnlYyvT4GR2VQkFHK5J3Gu5kx7iAOgEWFtWitUUod+FwDgPxX4Rjj9/spLS2lpqYGl8t1xIEBGC8j+15IxKHJ8+odeV6909/PS2uNy+WipqaG0tJSZHFGIYQ4hM6pRQljKOiUWpRYsoNImxmA2pZ2Gpye/Y8ecKTn4BhTX1+P0+nEYrEwaNAgIiMjj/iXPYfDAUB0dHRfNPG4J8/r/9u78/ioqvv/468TSAKBsMeAgEIJa2qDWFkENCRK3UAQ6xeplqCQqghiaW3dEBQqavkK1oWaAAER+QWRVcAvJKhIwQUEUVARCiIiSMKSsCdzfn/MTMgy2SeZGXg/H495THLvPeeeexgm93PPVj6qr/LxdX05HA5OnDjBzz//zMmTJ8nMzKRJkyY+KYuISEBwDUoG6Bn+K15rfn7X6S1f0DQ2jl2/nADgUNZpGtUJqeYCepdaDvyM+8ahadOmhIeHq8lfRLwqKCiI8PBwmjZtCpz/zhERkWLkazm4OjSCPS2CyXU1HpzZ+T2XBZ9fHfnQ8TPVXDjv052nnzlzxvmhqlOnjo9LIiIXMvd3jPs7R0REipEvOAg7d4ro5l34b+T53dHHfsj7+eAFMJ2pggM/4x5joBYDEalK7gFz3hjXJCJyQcvXrYhTR7nm0msKjDtoc2h33s+HsgL/gYvuQKuQMSbcGHOpMeZSIFgD/0TEXwT6bBoiItUmX8sBp47Q89KefJtvpeSm+77L+/kXBQdSirHAftfrioMHD/q4OCIiIiJSLvlbDk4fpX2j9hxq0zBvU4NvvuTXh3eBtepWJKWaAjR3vbZFRkaWcriIiIiI+JXa5wMBTh0lyATRsWNvDtc7v/nFj1/nwS8XqVuRlMxam2Wt/cla+xNwTuMIJL/x48djjOGDDz7wdVFERESkOIW6FQFcc+k1fPGrgt0zr/txC4ey1HIgUmWMMQX6RQ8ZMgRjDK+//nqpaW+44QaMMSxevBiAlJSUvPyKe+WXnp7O2LFjiY+Pp1GjRhhj6NWrV5nKvXTpUm666SYiIiIIDQ2lZcuW9O/fn40bN5b94kVERMQ/1MrXcnD6KAA9Lu3BvNggFvQ8f/9Q79xJjmZmBfxED1oETQJGYmIib7/9NklJSTzwwAPFHrdnzx7S0tJo1qwZt956a4F9MTExDBgwoNRzJSUl8d5771GrVi2ioqI4cuRIqWkcDgf3338/SUlJtGzZkttvv53GjRtz8OBBNm7cyKZNm+jevXup+YiIiIgfyd9ycPwAfPc+TcIa0zLyVyy49r9c91UOlxxz7q6XlcnxUznUDwv2SVG9QcGBBIzY2FjatWvHF198webNm+nSpYvH42bMmIG1lmHDhlGzZsGPeOfOnRk/fnyp5xozZgzPP/88HTp0YN++fbRu3brUNFOmTCEpKYl77rmH5ORkQkIKrpB47lzgL6kuIiJy0ck/5uDEIZh3JwDXNKzPNw3qc7geecFBxKmjHMo6HdDBgboVSUAZMWIE4Hyy70lubi6zZs3CGMPw4cMrfJ5u3boRHR1NjRo1ynT88ePHeeaZZ2jRogVJSUlFAgOA4OCyfVH88MMPREdHExISwty5c8tVbhEREfGyWg2gweVFNvc85RxfkBF+vmuRMzgI7EHJCg4koAwdOpSQkBDmzZvHyZMni+xfuXIl+/fv5/rrry/T035vWbp0KdnZ2QwePBiHw8E777zD5MmTefXVV9m6dWuZ89m6dSs9evRg3759rFixgrvvvrsKSy0iIiKlCgqCP7wDPR6CK34PbeLh0ivpXCuS2g4HGflmLYo4dTTgpzNVtyIJKBEREQwYMIDU1FRSU1NJSEgosN/dopCYmOgx/ZYtWzx2KxowYACdO3eucLk+++wzwNk60LFjR/bu3Vtg/6BBg5gzZw5hYWHF5rFmzRoGDRpEnTp1WLduHTExMRUuj4iIiHhRRDv43aQCm0KAqxfdTka9bwHnIOQmp44FfMuBgoMA0+rv7/m6CGW2Z/ItVZJvYmIiqampJCcnFwgODhw4wIoVK4iMjOS2227zmHbr1q0en+S3atWqUsHBoUOHAHjhhRe48sorSU1NpVOnTmzfvp2RI0eycOFC6tatS0pKisf0c+fO5d577yUqKopVq1Zx2WWXVbgsIiIiUj2uafxrVtf7Nu/3iFNH+e/xwA4O1K1IAk5cXBxt2rRh/fr17NixI2/7rFmzyMnJISEhodj+/UOHDsVaW+RVuAWivHJzcwGoXbs2y5Yto2vXrtStW5euXbuydOlS6taty5tvvsn+/fuLpJ02bRp//OMf6datG+vXr1dgICIiEiB6RnQpNObgSMCvdaDgQAJO/sHGycnJAFhrmTlzZqUHIldUw4bOmQy6d+9O06ZNC+xr1qwZ3bp1w+Fw8PnnnxdJ+9FHH2GtJT4+Pi8fERER8X+XN/gVwbVz836POHWUQwHecqBuRQGmIl11srKyAAgPD/d2cXxm2LBhjBs3jjlz5vDcc8+xbt06du3aRVxcHFFRUdVenvbt2wPQoEEDj/vdN/2nTp0qsm/GjBlMnjyZCRMmkJuby7PPPltl5RQRERHvMaHhNKt5jrM1gwnJgbCcsxzPKH1tJH+mlgMJSJGRkfTv35/Dhw+zePHivBaE4gYiV7X4+HgAvv76a4/73dtbtWpVZF+DBg1YvXo1vXv3ZuLEiTz66KNVVk4RERHxotBwmuXmcDjf81d76GBAr5Ks4EAClnvNgylTprBo0SKaNGnCwIEDfVKWmJgYevbsyY4dO/ICFbfk5GR27NhBmzZtuPrqqz2mDw8PZ9WqVcTHx/Piiy/y8MMPV0exRUREpDJCw2mWk0NGvfPjDsKPZ5B9JseHhaocdSuSgNW3b19at27Np59+CsBDDz3kcfGxitiwYQPz5s0DIDs7G4CdO3cWGLhceOahGTNm0KtXL0aMGMG7775LdHQ027dvZ8WKFYSFhZGSklLiomphYWEsX76cQYMG8fLLL3P69GmmT5+OMabYNCIiIuJDNUNplgt7C6x1ENjTmSo4kIBljOG+++7jySefBM63JHjD7t27mT17doFthw4dKrCtcHDQvn17Nm/ezIQJE1i5ciVr1qyhUaNG3HXXXTz11FN07Nix1PPWqlWLRYsWMXjwYN544w3OnDnDzJkzCQpSI5+IiIg/ujQohM35uhU1ORnYg5IVHIjfKkt/vSeeeIInnnii1OMSEhLKNV3pH/7wB+6///4yH+/WsmXLIt2KijN+/HiPC7KFhITw7rvvlvvcIiIiUv2aBdXhcD0H7oXQIk4d5VDWaer7tlgVpuCgChljwgF3LBnscDh8WRwRERER8bImIXU4Gp6V93vE6UwOHT8TsMGB+ipUrbHAftfrioMHD/q4OCIiIiLiTTVCwiHs/APgQF8ITcFB1ZoCNHe9tkVGRvq4OCIiIiLiVaHhhIadn52oyanjHDqu4EA8sNZmWWt/stb+BJzToFIRERGRC0xoXZrUyOGka8LE0Nxcsg5l+LZMlaC7VRERERGRigoJp2lOLhn5pjPN+fln35WnkhQciIiIiIhUVGhdLs3JISP8/LpEQb8c8mGBKkfBgYiIiIhIRYXUpVlOLofzTU9U93gGZ3JKn5LdHyk4EBERERGpqNC6NMst2HIQceooR88oOBARERERubi4Ww7yjTmIOHVEwYGIiIiIyEUntB61rOVs3XxrHZzO5JiCAxERERGRi0xoXQBqhuXmbWqilgMRERERkYtQiDM4qFPrfHDQ+NQJjp7OLS6FX1NwICIiIiJSUa6WgwiTw/Hazk3BDgc5R477sFAVp+BAxEfGjx+PMYYPPvjA10URERGRigoJB+DSQguhBR3J9FGBKkfBgfgtYwzGnJ8WbMiQIRhjeP3110tNe8MNN2CMYfHixQCkpKTk5VfcK7/09HTGjh1LfHw8jRo1whhDr169ylTupUuXctNNNxEREUFoaCgtW7akf//+bNy4sewXXwkJCQkYY0hJSSn2GHdgMn78eI/bjTG89tprHtO66/LJJ58ssu/bb79lxIgRREVFUbt2berUqUPr1q3p27cvzzzzDAcPHgQgNzeXhg0bEhwcTFZWVpF8Dhw4kFeO4q7j2muvxRjDunXrir1OERGRKudqOWhWaCG04KNHfVSgyqnp6wKIlFViYiJvv/02SUlJPPDAA8Uet2fPHtLS0mjWrBm33nprgX0xMTEMGDCg1HMlJSXx3nvvUatWLaKiojhy5EipaRwOB/fffz9JSUm0bNmS22+/ncaNG3Pw4EE2btzIpk2b6N69e6n5+Ivx48dz9913U69evdIPxhlQ3XLLLZw+fZoePXpw4403EhYWxp49e9iyZQurV6/mmmuuITIykho1anDdddexZMkSPvroI2655ZYCeaWlpQHOADEtLY2EhIQC+0+ePMknn3xCnTp16Natm1euV0REpEJCnS0HzXJy2JbvT2btY6XfO/gjBQcSMGJjY2nXrh1ffPEFmzdvpkuXLh6PmzFjBtZahg0bRs2aBT/inTt3LvK03JMxY8bw/PPP06FDB/bt20fr1q1LTTNlyhSSkpK45557SE5OJiQkpMD+c+fOlZqHv4iKiuL777/n+eefZ9KkSWVK86c//YnTp0+TkpLC0KFDi+z/8ssvadiwYd7vcXFxLFmyJC+oyC89PZ3atWsTFxdHenp6kbw+/vhjzp49S1xcXJF6FhERqVbBdQBcax0YwDlLUb3sozhs4M1YpG5FElBGjBgBOJ/se5Kbm8usWbMwxjB8+PAKn6dbt25ER0dTo0aNMh1//PhxnnnmGVq0aEFSUpLHG9bg4OAy5fXDDz8QHR1NSEgIc+fOLVe5vWXUqFFceumlvPTSS/z444+lHv/LL7/w/fffU79+fY+BAcBvfvMbWrZsmfd7fHw8gMeb//T0dHr27Mnvfvc7fvrpJ7799tsi+/PnISIi4jNBQRBSlwYOB6dqnw8G6p07zpkAnLBIwYEElKFDhxISEsK8efM4efJkkf0rV65k//79XH/99WV62u8tS5cuJTs7m8GDB+NwOHjnnXeYPHkyr776Klu3bi1zPlu3bqVHjx7s27ePFStWcPfdd1dhqYsXFhbGs88+y6lTp3jiiSdKPb5evXrUrFmT7OxsDhw4UKZzREdHExkZydatW8nIyMjbvmvXLvbu3UtcXBx9+vQBznczcnP/ruBARET8QkhdDBAWdH4htBB7itM5gddyoG5FElAiIiIYMGAAqamppKamFumL7m5RSExM9Jh+y5YtHrsVDRgwgM6dO1e4XJ999hngbB3o2LEje/fuLbB/0KBBzJkzh7CwsGLzWLNmDYMGDaJOnTqsW7eOmJiYCpcHYPHixezZs8fjvrLMkJSQkMC0adOYO3cujzzySIn1Exoaym233cbChQvp1asXDzzwAL179+aKK64o8Zrj4uJ4++23Wbt2LXfccQdwvlWgT58+REdHExERQXp6Og8++CAAx44d44svvqBx48aV+jcTERHxmtC6kA3hJhf37XWoPcXpHN8WqyIUHASa8fXLnSS8CopRJuOPVUm2iYmJpKamkpycXCA4OHDgACtWrCAyMpLbbrvNY9qtW7d6fJLfqlWrSt1oHjp0CIAXXniBK6+8ktTUVDp16sT27dsZOXIkCxcupG7dusXOvDN37lzuvfdeoqKiWLVqFZdddlmFy+K2ZMkSlixZUuH0QUFBvPDCC9x444385S9/Yc2aNSUen5SUhLWWRYsW8de//jUvj1//+tf069ePUaNGERkZWSCNOzhIT08vEByEh4fz29/+FmMMsbGxpKenY63Nm/o1NzeXPn36FJllSkRExCdcC6HVyxcc1HKc4kxu4LUcqFuRBJy4uDjatGnD+vXr2bFjR972WbNmkZOTQ0JCQrH9+4cOHYq1tsircAtEeeXmOjsV1q5dm2XLltG1a1fq1q1L165dWbp0KXXr1uXNN99k//79RdJOmzaNP/7xj3Tr1o3169d7JTAAZ314ulZrLU8//XSZ8vjd735H3759SUtLY8WKFSUe27BhQxYuXMju3buZPn06w4cP54orrmDbtm1MmjSJTp065bWwuHkad7B27Vp69+6dN5i8T58+ZGRk5AV17mOvv/76slWEiIhIVWvg/NvdgPODDOo6TnAqAFsOFBxIwMk/2Dg5ORkAay0zZ86s9EDkinLPwtO9e3eaNm1aYF+zZs3o1q0bDoeDzz//vEjajz76CGst8fHxBWbz8Rf//Oc/CQoK4tFHH80LgkrSqlUr/vSnP5GUlMSWLVv44Ycf6NevH5mZmXkDyt1at25N69at+fbbb9m/fz9fffUVBw8eJC4uLu+Y2NhY4Pw4A403EBERv9PncYi6nkY1z09kEpybE5AtB+pWFGgq0FXHvchUeLjPOhh53bBhwxg3bhxz5szhueeeY926dezatYu4uDiioqKqvTzt27cHoEGDBh73u2/6T506VWTfjBkzmDx5MhMmTCA3N5dnn322yspZEVdccQVDhw5l1qxZzJw5s8yzLrm1aNGC+fPn07Bhw7zBx40bN87bHxcXx4wZM0hPT89bT8I9EBmgY8eONG3alPT0dO655x6+/vprLrvsMp/8O4uIiHh0SUe4eyFNXhkAOGfYq+nIDcgxB2o5kIAUGRlJ//79OXz4MIsXL85rQShuIHJVcz/F/vrrrz3ud29v1apVkX0NGjRg9erV9O7dm4kTJ/Loo49WWTkrauLEiYSFhTFu3DhOnDhR7vShoaHFrkeQv2tReno6DRs2LDL+IzY2lnXr1vH+++8XSCMiIuJPImqdXwWtZo6D0wHYcqDgQAKWu4vKlClTWLRoEU2aNGHgwIE+KUtMTAw9e/Zkx44deYGKW3JyMjt27KBNmzZcffXVHtOHh4ezatUq4uPjefHFF3n44Yero9hldumllzJ27Fh+/vlnpk6dWmT/iRMnePbZZzl48KDH9FOnTiU7O5tOnToVaDUA8roQrVmzhg8//JDrrruOoKCCX019+vQhKyuLF198EVBwICIi/imiToO8n4NzLSdyAq/pQN2KPDDGPAbcDrQHzgAbgcestV/5tGBSQN++fWndujWffvopAA899JDXVsvdsGED8+bNAyA7OxuAnTt3Fhi4XHjmoRkzZtCrVy9GjBjBu+++S3R0NNu3b2fFihWEhYWRkpJS4qJqYWFhLF++nEGDBvHyyy9z+vRppk+f7jcz8jz66KO88cYbfP/990X2nTt3jnHjxjFhwgS6du1K586dadiwIZmZmaxfv55t27ZRp04dpk+fXiRtZGQk0dHRea0r+ccbuLm7GW3btq3YY0RERHwtOKxu3s8hOZCdW3RNJn+nlgPPYoHXgGuAOCAHWGOMaeTLQklBxhjuu+++vN8LD3atjN27dzN79mxmz57NwoULAed0pe5ts2fPLpKmffv2bN68mfvuu4+tW7cybdo0Nm3axF133cXnn39Or169Sj1vrVq1WLRoEQMHDuSNN95g2LBhOByOUtNVh7p16zJhwgSP++rVq8fKlSsZPXo0ubm5LF68mBdeeCFvheeHH36Yr776it69e3tMn78lIP94A7e2bdvSvHlzADp16kSzZs0qezkiIiJeZ/IHB+fgVO5ZH5amYoy1gdcXqroZY+oCx4AB1tplFcxjU5cuXbps2rSpxOPcU3N27NixIqfx6EIckFyVVF/lo/oqH3+qr6r4vvE294J97lmrpGSqr/JRfZWP6qt09uNpfDP8fCv5hNGPMv/BYdVejquuuorNmzdvttZeVd60AdlyYIy5wxjzL2PMOmPMcWOMNcbMLSVNC2PMTGPMT8aYM8aYPcaYqcaYsswdGY6zro545QJERERE5IJjQsLIqXH+wfu5s4HXrShQxxw8CcQA2cCPQIeSDjbGtAH+A1wCLAG+AboCDwM3GmN6WmszSshiGrAF2FDpkouIiIjIhalmLXJqQk3XskCOs0WnMPd3gRocPIIzKPgeuA5YW8rxr+EMDEZba//l3miM+V9XXpOA+z0ldB3TC+hlrS19BSgRL5s6dSpHjx4t9bjY2Fg19YqIiPhScG1ya+KczgZwqOWgelhr84KB0mZyMcb8CugL7AFeLbT7aSARuMcYM9Zae6JQ2peAwUAfa+3uypdcpPymTp3K3r17y3SsggMREREfqlmL3HwTE5octRz4I/ech/9nrS0w7Yu1NssYsx5n8NAdSHPvM8ZMwxkYxFprvynryYwxxY047pCVlZU3mKc4YWFhhIWF5Q1a9IbcXGeDhzfzvJD5W325p+8sC1+U2d/qy9/5U33l5uZy8uTJUr+XfMldT/5cRn+i+iof1Vf5qL5K1zDzOxw1LeB8eG3P+eY7tjJ/YwJyQHI5tXe9f1fM/p2u93buDcaYV4FhwF3AEWNMU9errqcMREREREQcQaHYfC0HQblnfFeYCroYWg7qu96PFbPfvb1Bvm0Put7TCh7KBGB8SScrbsooY8ym8PDwLqV1+3BPLejNaQ79aerEQKD6Kh/VV/n4U33VqFGD8PBwunbt6uuiFEtTJ5aP6qt8VF/lo/oqg/3hrK8xLe/XGjlnfVJflfkbczEEB6VxD1rIm3fKWusfS9KKiIiISOCoWRtqnp/KtEbOGay1pY6R9ScXQ7cid8tA/WL21yt0nIiIiIhI+QXXwuRb5yDUcYYzOY4SEvifi6Hl4FvXe7ti9rd1vRc3JqHCjDHhOBdQAwh2OALrwyEiIiIi5VCzNibfmIOQ3LOcOJNDreAaxafxMxdDy4F72tO+xpgC1+u6ee8JnAI2VsG5xwL7Xa8rDh48WAWnEBERERG/EFyLoAItB+c4eTawlsm64IMDa+0u4P+AVsDIQrsnAHWAOYXXOPCSKUBz12tbZGRkFZxCRERERPxCzdoFgoPg3HNkn8nxYYHKLyC7FRljBgADXL82db33MMakuH4+bK39S74kDwL/AV42xsQDO4BuQB+c3YmeqIpyWmuzgCxXmc8FBV3wsZiIiIjIxatmKDXytxzknuPkWQUH1aEzMLTQtl+5XgB7gbzgwFq7yxjzW+AZ4EbgZuAA8DIwwVqbWdUFFhEREZELnDHUqHn+YXCo4yzZZ9StqMpZa8dba00Jr1Ye0uyz1g6z1jaz1oZYay+31j6swEB8Zfz48Rhj/GKlSWNMQMxb3apVK1q1auXrYoiIiBSrZs3z05aG5uZwMsC6FQVkcBAojDHhxphLjTGXotmKys0YU2Be4CFDhmCM4fXXXy817Q033IAxhsWLFwOQkpKSl19xr/zS09MZO3Ys8fHxNGrUCGMMvXr1KlO5ly5dyk033URERAShoaG0bNmS/v37s3FjVYx5L8hay6pVqxg1ahSdO3emYcOG1KpVi/bt2zNmzBg0KF5ERKRqBQef75gTkpPL6ZzAajkI1G5FgWIs8LT7F92YVU5iYiJvv/02SUlJPPDAA8Uet2fPHtLS0mjWrBm33nprgX0xMTEMGDCg1HMlJSXx3nvvUatWLaKiojhy5EipaRwOB/fffz9JSUm0bNmS22+/ncaNG3Pw4EE2btzIpk2b6N69e6n5VMaZM2e46aabCAkJ4dprr+X6668nNzeX9PR0pk2bxvz581m3bh1t27YtPTMREREpt5DgmpzCGRCE5uaSG2DPhhUcVK0pwBuun1dFRkZe4cvCBLrY2FjatWvHF198webNm+nSpYvH42bMmIG1lmHDhlGzZsGPeOfOnRk/fnyp5xozZgzPP/88HTp0YN++fbRu3brUNFOmTCEpKYl77rmH5ORkQkJCCuw/d+5cqXlUVo0aNZg4cSIPPvggDRs2zNvucDh48MEH+fe//82f//xnli1bVuVlERERuRiFhoRwilMABOfm4rC2lBT+Rd2KqpC1Nsta+5O19idAsxV5wYgRIwDnk31PcnNzmTVrFsYYhg8fXuHzdOvWjejoaGrUKNuiJcePH+eZZ56hRYsWJCUlFQkMAIKDg8uU1w8//EB0dDQhISHMnTu3XOUODg7miSeeKBAYAAQFBTFu3DiAco1xyMnJ4bXXXqN79+7Uq1ePsLAwrrzySl555RXyd5P75JNPMMZw++23F5tXx44dCQ0NJTOz4DCf999/n5tvvpkmTZoQGhpKmzZt+Otf/8rRo0fLXE4RERF/ERJ6/u99cK7F4VBwIFJlhg4dSkhICPPmzePkyZNF9q9cuZL9+/dz/fXXl+lpv7csXbqU7OxsBg8ejMPh4J133mHy5Mm8+uqrbN26tcz5bN26lR49erBv3z5WrFjB3Xff7bUyugOWwq0pxTl37hy33norI0eO5OjRowwZMoTExEQcDgejRo1i6NDzE4Z169aN9u3bs3z5cjIyMork9emnn/LNN9/Qr18/GjVqlLf9mWee4cYbb+STTz7hlltuYfTo0URFRfHPf/6Tnj17cvz48UpetYiISPWqGRya93MNh+Wso+p7DniTuhVJQImIiGDAgAGkpqaSmppKQkJCgf3uFoXExESP6bds2eKxW9GAAQPo3Llzhcv12WefAc4n9x07dmTv3r0F9g8aNIg5c+YQFhZWbB5r1qxh0KBB1KlTh3Xr1hETE1Ph8ngyY8YMAG688cYyHT9p0iTef/99HnroIaZOnZrXipKbm0tiYiIzZ87kjjvuIC4uDnAGbo8//jhvv/02Dz30UIG8Zs+enXeM29q1a3n66afp0aMHK1asoEGDBnn7UlJSGDZsGE8//TQvvfRSha9ZRESkupka53sPGOBM7mnfFaYCFBxUIWNMOBDu+tUrsxVdMTtwhi1sG7qtSvJNTEwkNTWV5OTkAsHBgQMHWLFiBZGRkdx2220e027dutXjk/xWrVpVKjg4dOgQAC+88AJXXnklqampdOrUie3btzNy5EgWLlxI3bp1SUlJ8Zh+7ty53HvvvURFRbFq1Souu+yyCpfFk88++4wJEyYQHh7OxIkTSz3e4XDwyiuv0LRpU1566aUC3atq1KjBlClTmDVrFm+99VZecHDPPffw5JNPMnv27ALBwdmzZ5k/fz6XXHIJN910U972l19+GXAGdPkDA4CEhASmTZvGW2+9peBAREQCS818XYstnFVwIPlotqIqEBcXR5s2bVi/fj07duygY8eOAMyaNYucnBwSEhKK7d8/dOjQYm/QKyM31zkrQe3atVm2bBlNmzoX7u7atStLly6lXbt2vPnmm0yaNInmzZsXSDtt2jSWLFlCz549Wbp0aZHxApX13Xff0a9fP86dO8f8+fNp06ZNmdJkZGTQtm3bYoOJ2rVrs2PHjrzfW7RoQXx8PKtXr2b79u106tQJgGXLlpGZmckjjzxSoEvThg0bCA4OZsGCBSxYsKBI/mfPnuWXX34hIyODxo0bl/eyRUREfKNQy8HpHAUHcp5mK6oC7sHGjz32GMnJyUyZMgVrLTNnzqz0QOSKct/Qd+/ePS8wcGvWrBndunUjLS2Nzz//vEhw8NFHH2GtJT4+3uuBwc6dO+nTpw+ZmZnMnz+f/v37lymde9zAzp07mTBhQrHHZWdnF/g9ISGB1atXM3v2bJ5//nnAc5ci9zlycnJKzN99DgUHIiISMGoWnJTkrOOUjwpSMQoOqpC1NgvIAjDGeGW2oop01cnKygIgPDy8lCMDx7Bhwxg3bhxz5szhueeeY926dezatYu4uDiioqKqvTzt27cHKNI9xs1903/qVNEviBkzZjB58mQmTJhAbm4uzz77rFfKtGPHDuLj48nIyGDBggXFdrXypH79+gAMHDiQd999t8Rj3Z8v9/H16tVj7ty5/OMf/yAzM5OVK1cSExNTZAxF/fr1cTgcRWYvEhERCWj5Ww4snHEEVsuBZiuSgBQZGUn//v05fPgwixcvJjk5GSh+IHJVi4+PB+Drr7/2uN+9vVWrVkX2NWjQgNWrV9O7d28mTpzIo48+WunybNu2jdjYWDIzM1m4cGG5AgOADh060KBBAzZu3Fiu9Rlq167NnXfeyU8//cSaNWt46623yMnJKdJqAM5WliNHjhRbZyIiIoHIBBV89h5oA5IVHEjAcq95MGXKFBYtWkSTJk0YOHCgT8oSExNDz5492bFjR16g4pacnMyOHTto06YNV199tcf04eHhrFq1ivj4eF588UUefvjhCpdly5Yt9OnTh6ysLJYsWVJkleiyqFmzJqNGjeLAgQOMHj3aY4vHgQMH2L59e5Ht7kHic+bMYc6cOdSsWZM//OEPRY575JFHAOe/408//VRk/4kTJ9i4cWO5yy4iIuJTQSbvR2PhnOOMDwtTfupWJAGrb9++tG7dmk8//RSAhx56yOPiYxWxYcMG5s2bB5zvV79z584CsyMVHtg8Y8YMevXqxYgRI3j33XeJjo5m+/btrFixgrCwMFJSUkpcVC0sLIzly5czaNAgXn75ZU6fPs306dMxxhSbprAjR44QHx9PZmYm8fHxbNiwgQ0bNhQ5bsyYMcV2gXJ76qmn2Lp1K9OnT2fZsmXExcXRvHlzDh06xM6dO1m/fj2TJk1i5MiRBdL17NmTqKgoFixYwLlz5+jXrx+XXHJJkfzj4+OZPHkyjz32GG3btuXmm2+mdevWZGdns3fvXj788EN69erFqlWrynz9IiIiPlfo77bGHIhUE2MM9913H08++SRwviXBG3bv3p03kNbt0KFDBbYVDg7at2/P5s2bmTBhAitXrmTNmjU0atSIu+66i6eeeipvVqWS1KpVi0WLFjF48GDeeOMNzpw5w8yZMynreJVjx47l9eFPS0sjLS3N43EJCQmlBgfBwcEsXryYuXPnkpKSwvLly8nOziYiIoLWrVvz7LPPemwRAOfg46eeeirv5+L87W9/o2fPnrz88st8/PHHLFmyhPr169O8eXMSExMZMmRIGa5aRETEj5jzf7MNcDbAxhwYawNrSedAUmidg1WdO3e+4osvvigxjXtqyLLcSJbVhTgguSqpvspH9VU+/lRfVfF9420ffPABALGxsT4tR6BQfZWP6qt8VF9lk/W/I/jxjY8B2NTGsHb4cGYM/HO1luGqq65i8+bNm621V5U3rcYcVK2xwH7X6wqtcyAiIiJygcvXrcgADlv5RXCrk4KDqjUFaO56bYuMjPRxcURERESkSgUVHHMQaMGBxhxUoapY50AuPlOnTuXo0aOlHhcbG6umXhEREZ/Ld79nFRyIiJdNnTqVvXv3lulYBQciIiK+ZYp0K8r1XWEqQMGBiJ/bs2ePr4sgIiIiZZV/tiILjgCb/Ef9XEREREREvCXAxxwoOBARERER8ZrADg7UragKFVrnINjhCKwPh4iIiIiUU76WAxOAA5LVclC1tM6BiIiIyEWl0IBkFBzIeVrnQERERORiYgreXgday4G6FVUhrXMgIiIicpExhcccaLYiEREREZGLkgkqOJWpJbDWOVBwICIiIiLiNYUXQQusbkUKDkREREREvEXdikSkIsaPH48xhg8++MDXRcEYQ2xsrK+LUapWrVrRqlUrXxdDRESkePkHJFuwmq1IxDuMMZh80feQIUMwxvD666+XmvaGG27AGMPixYsBSElJycuvuFd+6enpjB07lvj4eBo1aoQxhl69epWp3EuXLuWmm24iIiKC0NBQWrZsSf/+/dm4cWPZL76CrLWsWrWKUaNG0blzZxo2bEitWrVo3749Y8aM4WKeTnfPnj0YY0oNLjx9HvJvv/zyyzl9+rTHtK1atcIYQ05OToHtubm5JCUlcd1113HZZZfRqFEjLrnkEn7zm98wfPhwli5dmnfslClTMMbw97//3eM5EhMTS7yO9PR0jDHEx8eXeJ0iIlJF8q9zgMUGWLcizVYkASMxMZG3336bpKQkHnjggWKP27NnD2lpaTRr1oxbb721wL6YmBgGDBhQ6rmSkpJ47733qFWrFlFRURw5cqTUNA6Hg/vvv5+kpCRatmzJ7bffTuPGjTl48CAbN25k06ZNdO/evdR8KuPMmTPcdNNNhISEcO2113L99deTm5tLeno606ZNY/78+axbt462bdtWaTkuZD/88ANTp04t9ua9sNzcXG699VZWrVpFgwYN6Nu3L82bNyc7O5tdu3Yxb948vvnmG/r37w+Qd1OflpbmMT/3zf/evXvZtWsXbdq0KbI/fz4iIlLdNJWpSLWIjY2lXbt2fPHFF2zevJkuXbp4PG7GjBlYaxk2bBg1axb8iHfu3Jnx48eXeq4xY8bw/PPP06FDB/bt20fr1q1LTTNlyhSSkpK45557SE5OJiQkpMD+c+fOlZpHZdWoUYOJEyfy4IMP0rBhw7ztDoeDBx98kH//+9/8+c9/ZtmyZVVelgtRw4YNMcbw3HPPMXz4cJo0aVJqmrfffptVq1YRExPDhx9+iHtK4/Bw5+LpJ0+e5JNPPsk7PiYmhsaNG7N582aOHj1KgwYN8vbt27ePXbt28fvf/54FCxaQnp6u4EBExM+EdWzJtuHHmdikIRaIQGMOxMUYE26MudQYcykQ7HAEVuToj0aMGAE4n+x7kpuby6xZszDGMHz48Aqfp1u3bkRHR1OjRo0yHX/8+HGeeeYZWrRoQVJSUpHAACA4OLhMef3www9ER0cTEhLC3Llzy1Xu4OBgnnjiiQKBAUBQUBDjxo0DKNcYh5ycHF577TW6d+9OvXr1CAsL48orr+SVV14h/+f5k08+wRjD7bffXmxeHTt2JDQ0lMzMzALb33//fW6++WaaNGlCaGgobdq04a9//StHjx4tczmrS1hYGE899RTHjx9nwoQJZUrzn//8B4CEhATq16/vMc8+ffrk/W6MoU+fPjgcDj788MMCx7pbE0aPHk2TJk3yAgG37OxsPvvsM+rVq8dvf/vbcl2biIh4hwkKogYWawwYE3DdihQcVK2xwH7X64qLub+3twwdOpSQkBDmzZvHyZMni+xfuXIl+/fv5/rrry/T035vWbp0KdnZ2QwePBiHw8E777zD5MmTefXVV9m6dWuZ89m6dSs9evRg3759rFixgrvvvttrZXQHLIVbU4pz7tw5br31VkaOHMnRo0cZMmQIiYmJOBwORo0axdChQ/OO7datG+3bt2f58uVkZGQUyevTTz/lm2++oV+/fjRq1Chv+zPPPMONN97IJ598wi233MLo0aOJiorin//8Jz179uT48eOVvGrvGzlyJG3atOHf//433333XanHN27cGKBMx7rFxcUBRbsWpaWlUadOHbp168Z1111XJDj48MMPycnJITY2tsyBrYiIeJkxBW6w1a1I8psCvOH6eVVkZOQVvizMhSAiIoIBAwaQmppKamoqCQkJBfa7WxQSExM9pt+yZYvHbkUDBgygc+fOFS7XZ599Bjif3Hfs2JG9e/cW2D9o0CDmzJlDWFhYsXmsWbOGQYMGUadOHdatW0dMTEyFy+PJjBkzALjxxhvLdPykSZN4//33eeihh5g6dWrezWZubi6JiYnMnDmTO+64I+9GdujQoTz++OO8/fbbPPTQQwXymj17dt4xbmvXruXpp5+mR48erFixokD3mZSUFIYNG8bTTz/NSy+9VOFr9uTo0aNl6lpWnODgYCZPnszvf/97/v73v/Puu++WePztt9/O888/z/Tp08nKyuLGG2+kc+fOREdHF5vG3SWo8M3/2rVr6dWrF8HBwfTp04eFCxfy1Vdf8etf/7rA8epSJCLiQyaoQHAQaLMVKTioQtbaLCALwBhzLiio8g01Ozp0rHQe1aXjNzuqJN/ExERSU1NJTk4uEBwcOHCAFStWEBkZyW233eYx7datWz0+yW/VqlWlgoNDhw4B8MILL3DllVeSmppKp06d2L59OyNHjmThwoXUrVuXlJQUj+nnzp3LvffeS1RUFKtWreKyyy6rcFk8+eyzz5gwYQLh4eFMnDix1OMdDgevvPIKTZs25aWXXirwFLpGjRpMmTKFWbNm8dZbb+UFB/fccw9PPvkks2fPLhAcnD17lvnz53PJJZdw00035W1/+eWXAWdAlz8wAGcXnGnTpvHWW295PTg4duxYmbsEFeeOO+6gR48eLFq0iI8//rjEmayuvPJK5s6dy8MPP8zcuXPzuoo1atSIa6+9lnvvvZd+/foVSNOuXTtatGjB119/zcGDB4mMjOTbb79l//79jB49GiCvK1J6erqCAxERf3JJJ441vQZyvwUUHIhUubi4ONq0acP69evZsWMHHTs6A6ZZs2aRk5NDQkJCsf37hw4dWuwNemXk5jqXRq9duzbLli2jadOmAHTt2pWlS5fSrl073nzzTSZNmkTz5s0LpJ02bRpLliyhZ8+eLF26tMh4gcr67rvv6NevH+fOnWP+/PlFBrAWlyYjI4O2bdsWG0zUrl2bHTvOB4AtWrQgPj6e1atXs337djp16gTAsmXLyMzM5JFHHinQpWnDhg0EBwezYMECFixYUCT/s2fP8ssvv5CRkZHXNccbLr/8cvbs2VPsfk/TmHoyZcoUrrnmGsaOHcvGjRtLTHfnnXcycOBA1q5dS1paGl9++SUbN25k8eLFLF68mD/+8Y950+26xcXFMWfOHNauXcvgwYPzbvzdQUGnTp2IjIwkPT2d0aNHk5GRwdatW2natGmJrRIiIlLFLutOxq8Gwc5/AM5pxgOJggMJOO7Bxo899hjJyclMmTIFay0zZ86s9EDkinLf0Hfv3j0vMHBr1qwZ3bp1Iy0tjc8//7xIcPDRRx9hrSU+Pt7rgcHOnTvp06cPmZmZzJ8/P2+6zNK4xw3s3LmzxKfs2dnZBX5PSEhg9erVzJ49m+effx7w3KXIfY6cnJxSn+JnZ2d7NTjwlh49enDHHXfwzjvvkJqayv/8z/+UeHxwcDB9+/alR48egHMg8sKFC7n33nuZM2cOAwcOLDDNbnx8PHPmzCEtLY3BgweTlpZG/fr1C8zSdd111/H++++Tm5vL2rVr8z5HIiLiWy0a1c37uUOzuiUc6X8UHASYinTVycrKAs5PnXghGDZsGOPGjWPOnDk899xzrFu3jl27dhEXF0dUVFS1l6d9+/YARbrHuLlv+k+dOlVk34wZM5g8eTITJkwgNzeXZ5991itl2rFjB/Hx8WRkZLBgwYJiu1p54p5VZ+DAgaX2qXd/vtzH16tXj7lz5/KPf/yDzMxMVq5cSUxMTJExFPXr18fhcBSZvSiQTJ48mSVLlvDYY48xcODAcqWtUaMGd955J9u2bWPixImkp6cXCQ7A2VXIWssHH3zAtddeW6CLV58+fUhNTWXz5s3qUiQi4kfCa53vwVCvdmBNEKHZiiQgRUZG0r9/fw4fPszixYtJTk4Gih+IXNXcN2Rff/21x/3u7Z5WtW3QoAGrV6+md+/eTJw4kUcffbTS5dm2bRuxsbFkZmaycOHCcgUGAB06dKBBgwZs3LixXOsz1K5dmzvvvJOffvqJNWvW8NZbb5GTk1Ok1QCcrSxHjhwpts4CQZs2bXjwwQf573//y7/+9a8K5eEO2gs3Ozdv3px27dqxe/dulixZQkZGRt74Drf84w4UHIiI+I8a5nxAEGjdihQcSMByr3kwZcoUFi1aRJMmTcr99NZbYmJi6NmzJzt27MgLVNySk5PZsWMHbdq04eqrr/aYPjw8nFWrVhEfH8+LL77Iww8/XOGybNmyhT59+pCVlcWSJUuKrBJdFjVr1mTUqFEcOHCA0aNHe2zxOHDgANu3by+y3T1IfM6cOcyZM4eaNWvyhz/8ochxjzzyCOD8d/zpp5+K7D9x4gQbN24sd9mr27hx42jQoAGTJk0q0s0KnIugrV69Gk/rnPz88895M2xde+21Rfa7gwH3GhX510MAZ4vVpZdeyltvvcW3335LVFSU1wezi4hI+eWfr0hTmYpUk759+9K6dWs+/fRTAB566CGPi49VxIYNG5g3bx5wvl/9zp07C8yOVHhg84wZM+jVqxcjRozg3XffJTo6mu3bt7NixQrCwsJISUkpce75sLAwli9fzqBBg3j55Zc5ffo006dPL/MAWYAjR44QHx9PZmYm8fHxbNiwgQ0bNhQ5bsyYMcV2gXJ76qmn2Lp1K9OnT2fZsmXExcXRvHlzDh06xM6dO1m/fj2TJk1i5MiRBdL17NmTqKgoFixYwLlz5+jXrx+XXHJJkfzj4+OZPHkyjz32GG3btuXmm2+mdevWZGdns3fvXj788EN69erFqlWrynz9vtCoUSMef/zxYlt8PvnkE6ZNm0bTpk3p1atX3piT/fv3895773Hq1Cluu+027rjjjiJp4+PjmT59Otu2baNx48b85je/KXJMbGxs3mdVrQYiIv4h/99uBQci1cQYw3333ceTTz4JnG9J8Ibdu3fnDaR1O3ToUIFthYOD9u3bs3nzZiZMmMDKlStZs2YNjRo14q677uKpp57Km1WpJLVq1WLRokUMHjyYN954gzNnzjBz5kzKOg3usWPH8vrwp6WlFVlEyy0hIaHU4CA4OJjFixczd+5cUlJSWL58OdnZ2URERNC6dWueffZZjy0C4Bx8/NRTT+X9XJy//e1v9OzZk5dffpmPP/6YJUuWUL9+fZo3b05iYiJDhgwpw1X73ujRo3nttdc8zoI0duxY2rZty5o1a/jyyy95//33OX36NI0bNyY2NpYhQ4YwZMgQj0Fgnz59MMZgrSU2NrbYYxQciIj4l/zdihwBNpWpCbR+UIHKGLOpS5cuXTZt2lTice6pIctyI1lWF+KA5Kqk+iof1Vf5+FN9VcX3jbd98MEHgLOFREqn+iof1Vf5qL7K7qMfP2JkmrNlvWfznky/fnq1nv+qq65i8+bNm621V5U3rcYciIiIiIh4kQYki4iIiIgIoDEHUgxjTDjg7jsQ7Gm2EpHSTJ06laNHj5Z6XGxs7EXV1LtlyxYWL15cpmPHjx9fpWURERHJL8hotiLxbCzwtPuXgwcP+rAoEqimTp3K3r17y3TsxRYclLa6spuCAxERqU5RDaJIaJJAEEH0junt6+KUi4KDqjUFeMP186rIyMgrfFkYCUyeZsAR54xL+aeWFRER8RdNajfhqjrOscC/bfpbH5emfBQcVCFrbRaQBWCMOVfW6ShFRERERHxBd6siIiIiIgIoOBARuSgF2tR6IiJSPRQc+Bn31Fea2UhEqpI7OPC06rKIiFy8FBz4mdDQUABOnDjh45KIyIXM/R3j/s4REREBBQd+JzzcuSzCzz//TFZWFg6HQ83/IuIV1locDgdZWVn8/PPPwPnvHBEREdBsRX6nUaNGnDhxgpMnT/Ljjz96Jc/c3FwAatSoUcqRAqqv8lJ9lY8/1VdYWBiNGjXydTFERMSPqOXAzwQFBdGyZUsiIiKoVauWV/oDnzx5kpMnT3qhdBcH1Vf5qL7Kx9f1ZYyhVq1aRERE0LJlSzTFsoiI5KeWAz8UFBREkyZNaNKkiVfy++CDDwDo2rWrV/K70Km+ykf1VT6qLxER8Wd6ZCQiIiIiIoCCAxERERERcVFwICIiIiIigIIDERERERFxUXAgIiIiIiKAggMREREREXFRcCAiIiIiIgAYa62vy3BRMMZk1K5du1HHjh2r/dxZWVkAhIeHV/u5A5Hqq3xUX+Wj+iof1Vf5qL7KR/VVPqqv8vFlfe3YsYNTp05lWmsblzetgoNqYoz5L1AP2OOD03dwvX/jg3MHItVX+ai+ykf1VT6qr/JRfZWP6qt8VF/l48v6agUct9a2Lm9CBQcXAWPMJgBr7VW+LksgUH2Vj+qrfFRf5aP6Kh/VV/movspH9VU+gVpfGnMgIiIiIiKAggMREREREXFRcCAiIiIiIoCCAxERERERcVFwICIiIiIigGYrEhERERERF7UciIiIiIgIoOBARERERERcFByIiIiIiAig4EBERERERFwUHIiIiIiICKDgQEREREREXBQciIiIiIgIoOAgYBljWhhjZhpjfjLGnDHG7DHGTDXGNCxj+sbGmOHGmEXGmO+NMaeMMceMMR8bY+4zxlxQn43K1pcrj+eNMWnGmH2u+so0xnxhjHnaGNO4Kstf3bxRXx7yvMcYY12v4d4sr6956fO1J1/9FH79XJXlr27e/HwZY3obYxYaYw648jpgjPk/Y8zNVVF2X/DC931CCZ8t9yu3qq+junjr82WMucX1WfrR9Z2/2xizwBjTo6rK7gte+v4yxph7jTEbjTFZxpiTrr+Po40xNaqy/NXJGHOHMeZfxph1xpjjrv87cyuYl9f/znqLFkELQMaYNsB/gEuAJcA3QFegD/At0NNam1FKHvcDrwMHgLXAD0AkcDtQH1gI/N5eAB8Qb9SXK5+zwGZgO3AIqAN0B34L/AR0t9buq4prqE7eqq9CebYEtgE1gLrACGttsjfL7Ste/HztARoAUz3szrbW/tM7JfYtb36+jDFPAs8Ch4HlOL/PmgBXAmuttY96/QKqmZe+7zsDA4rZ3RuIA96z1t7qnVL7jhf/Pz4PPApkAItxfsaigP5ATeCP1toK3RT6Ey/W1xzgHpx/G5cBJ4DrgU5cWPcTW4AYIBv4EegAvGWtvbuc+Xj976xXWWv1CrAX8D5ggVGFtv+va/v0MuQRB/QDggptb4ozULDAIF9fq7/Ul+v4WsVsn+TK5zVfX6s/1Ve+dAZYA+wCXnTlMdzX1+lv9QXsAfb4+noCqL5+7zp+NRDuYX+wr6/Vn+qrhPw3uPLp7+tr9Zf6cv0dzAV+Bi4ptK+PK5/dvr5WP6qvAe46AZrk2x4MLHLtS/D1tXqpvvoAbV1/12Jd1zbXF/Vepdfp64rWq5z/YPAr1wfnvxS9sQ/HGc2eAOpU4hyPu87xL19fb4DUV4z7JsXX1+uP9QU8DDiAa4HxXEDBgTfri4sgOPBWfeHsErvbdWyEr6/L3+urhPx/7cr/R6CGr6/XX+oL6ObKZ0kx+48DWb6+Xj+qrzmufEaW8Bnb5OvrrYL6i6UCwUF13JdU9nVB9Su/SMS53v/PWuvIv8NamwWsB8JwdnepqHOu95xK5OEvqqO++rnev6xEHv7Cq/VljOkITAamWWs/8mZB/YS3P1+hxpi7jTGPG2MeNsb0uZD66+K9+roGaA2sAI64+ob/zVVnF1J/8Kr+/vqT632GtfZCGHPgrfraCZwFuhpjmuTfYYy5FucN3BqvlNi3vFVfTV3vuz3sc2/rYoxpUMFyXmiq476kUhQcBJ72rvfvitm/0/XeriKZG2NqAn90/bqqInn4Ga/XlzHmL8aY8caYl4wx63D2ef4S501woPNafbk+S2/i7Kb2eOWL5pe8/flqirPOJuEce5AO7DTGXFfRAvoZb9XX1a73gzjHAS3H+f9vKvAfY8yHxpiISpTTX1TZ970xpjZwN85WvQti/A9eqi9rbSbwN5zj8LYbY94wxjxnjEkF/g9nV7Y/lZRHgPDW5+uw6721h32/yvdzhzKW60JXpfdx3qDgIPDUd70fK2a/e3uDCuY/GWcz4Apr7fsVzMOfVEV9/QV4GhgD9MIZRPW11v5SgfL5G2/W1zicA0MTrLWnKlkuf+XN+poFxOMMEOoAVwD/BloBK40xMRUupf/wVn1d4nq/H6iNc+BjOM7vrvdxdmFbUOFS+o+q/L6/05Vupb0AJlJw8Vp9WWun4pygoyYwAvg7znEu+4AUa+2hyhTUT3irvpa73v9sjGnk3uh6QDQh33E+n4XHT1T1fVylKTi48BjXuy13QmNGA2Nxjpq/x5uF8mPlri9rbVNrrcF5E3c7zicjXxhjulRB+fxNmerLGNMVZ2vBFGvthiovlf8q8+fLWjvBWpturT1orT1prf3KWns/zgFqtXGO17jQlbW+3F2tDHCHtTbNWpttrf0aGIizD/11F1gXI08q/H0PJLre/+2lsgSCMteXMeZR4B0gBWiDM2C/Cmc3mbeMMS9UURn9SVnraz6wEmc9uVtapgJbgJs5/yT8Qui6Vh0q8//aKxQcBB53RFm/mP31Ch1XJsaYkcA0nNN09nE1q14IqqS+AFw3cYuAvkBjnIOyAl2l6ytfd6LvgKe8VzS/VGWfr3ymu96vrUQe/sJb9XXE9b7bWrs1/w5XK5W71bNruUvoX6rq+74TznEbP+Ict3Gh8Ep9GWNigeeBpdbaP1trd7sC9s04g8/9wFhjzK9KyCYQeKW+XP3m++NsVf8Z58PFe3F+vnrhnA4WnNOcSvX83agUBQeB51vXe3F90dq63ovry1aEMWYM8ArwFc7A4EJacMnr9VWYtXYvzqAquvDgtQDkjfqq60rfETidf6ElnN2xAJJc26ZWtsA+VuWfL87/Qa1TiTz8hbfqy53P0WL2u4OH2mUrlt+qqs/XhTYQ2c1b9eVe72Ft4R3W2pPApzjvn64sbwH9jNc+X9baHGvtFGttZ2ttbWttPWvtjTj/NnYGTgFfV7bAF4jq+LtRKTV9dWKpMPeXVV9jTFD+ke7GmHCgJ87/hBvLkpkx5m84xxlsAW6w1h4uOUXA8Wp9leBS13ug/6H1Rn2dAWYUs68Lzj+oH+P8ggz0LkfV8flyd43xNBNIoPFWfX2Ecza1tsaYEGvt2UL7f+1631P5IvuU1z9fxphaOJ/sOij+/2mg8lZ9hbreixvU7t5e+HMXaKrj++seoBYw21p7rrSDLxLVdV9Scb6aQ1Wvir8ox+IZOBch6QC08ZDPU67jPwca+fq6/Lm+XNuaesg7iPOLoK339bX6S32VkPd4LqB1Drz4+Yr29H8QuBxnf10LPO7ra/WX+nLtm+s6fmKh7TfgvPE9CjTw9fX6S33lO+YeV7plvr42f60vnIO1Lc4uMs0L7bvJ9fk6BTT29fX6Q3259tXzsO1qIBPIAn7l62utgrqLpYR1DkqpL79eBM24CiMBxMOy2ztwLtrSB2cz1DXWtey2MaYVzoU29lprW+XLYyjOgVa5wL/w3Ldtj7U2pYouo9p4qb7G4Fzd9yOcK/1m4Jzm7jqcA5J/BuKttdur45qqkjfqq4S8x+PsWjTCWntBTJ/opc/XeJyzoax17c/CObjvFpxP3VYAA23RJ+QBx1ufL2PMJTjnA48C1uHs6nE5zj7hFhhirQ34GYu8/f/RNf1yL5wrIi+r6vJXNy/9fwzCefN2Pc7/i4twfsd3xNnlyABjrLXTquWiqpAX/z9+gjNg+gpnnUXjHIx8BrjdXhizH2KMGYBzRWhwTkryO5ytuutc2w5ba//iOrYVxddXmevdJ3wdeelVsRfQEufUhwdwNm3uxTmguFGh41rh/EO5p9D28a7tJb0+8PV1+lF9/Rp4FWf3q8M4uzQcAz5z1eUF1fJS2foqIV/35+6CaTnw0ufrOuBtnDOFHcW5EOEvOOdT/yM4H+RcKC9vfb6ARjiftP3XlU8Gzj+03X19jX5aXx1d+/dxAayIXJX1hfOp7xicXTuOu77zD+GctrOvr6/RD+vrr8Am1/fXGdf/yelAK19fn5fryv03rLjXnnzHlvb/sUz17ouXWg5ERERERATQbEUiIiIiIuKi4EBERERERAAFByIiIiIi4qLgQEREREREAAUHIiIiIiLiouBAREREREQABQciIiIiIuKi4EBERERERAAFByIiIiIi4qLgQEREREREAAUHIiIiIiLiUtPXBRAREe8yxrQC/gvMttYm+LY0VcsYEwxcA7QHGgOHgd3AR9bac74sm4hIIFJwICIixTLG7AGw1rbybUkKMsY0Bv4OJAL1PBySYYyZCUyw1p4oZ94NgBFAZ+BKoB1QA7jBWrumhHQ1gFHAvUBb4BSwEZhorf1PecogIuIrxlrr6zKIiIgXuZ6mtwGOWWsPVDKvPeBfwYExphfwDtAIeBNIBb4AjuBsPYgG7gQSgANAP2vttnLk39mVH8CPQDAQSQnBgTHGuMpxB/AtsMxVvv8BagGDrLVLynGZIiI+oeBARESK5W/BgTGmB7AW2Inzhvu7Eo7tAPw/nDf2va21O8t4joZAF+ALa22mMSYFGErJwcFdwDzgP0C8tfa0a/vVwMfAMaCNtTarTBcqIuIjGpAsInKBMca0MsZY101t/u0pru2tjDF/MsZsM8acNsYcNMa8YYypn+/YWGOMBS4HLnels8Xk28GV9z5jzBlXfvOMMe09lM1dhl8ZY0YZY740xpwyxnxQhuuqBywEvgSuKSkwALDWfgPEARnAm66n+6Wy1h6x1qZZazPLcrzLA673J92BgSuvz3AGKBE4WxVERPyaggMRkYvPC67XVuBVYD/OPvaL8h2zB5iA84n3MdfP7tdi90HGmBuBzcAfgM+AaUAacDvwqTGmSzFlmAY8C2xz/by+DOX+C87xBXe5n8AbYy4zxrxjjDnuei11BSvfG2PGW2szcI4B6AbcWIZzlJsxJhTnoOiTwDoPh6x0vcdVxflFRLxJA5JFRC4+3YErrLU/ABhjagLpQB9jTFdr7afW2j3AeGNMAoC1dnzhTFzdb97GeVN8rbV2e7590cAnQDLOLjqFdQGutNb+tywFdj31vw9401q7K9/51wEtgSU4ZynqhbMbT97DL2vtJ8aYTcDvOX+j7k1ROAcs77bW5njY7+7O1K4Kzi0i4lVqORARufg84w4MAFw3tLNcv3YtRz5/BBoAT+cPDFx5fg0kAVcaYzp5SPtCWQMDlyuAS3H263d7BLgMGGGtHWitHQv0AN4HGhZKvwGIKcf5ysPdHetYMfvd2xtU0flFRLxGLQciIhefzz1s2+d6L3xTXZIervcYY8x4D/vdT8o7AtsL7fu0HOcBaOV6/zbfthtwzkY0073BWuswxkwEhhRKfwIIL+c5vcU91kEzgIiI31NwICJy8TnqYZu7O0yNcuTT2PU+opTj6nrY9nM5zgMQ5nrP/3Q+AvjBFp12b4+H9C2BQ+U8Z1m5y1S/mP31Ch0nIuK31K1IREQqyn2zG2OtNSW8ZntIW96n6L+43pvl23YYZ7eiwgpsM8bUAX4HfFTOc5bV90Au8CvX+I3C2rreS5xdSUTEHyg4EBGRkuRSfGvCRtd772oox5eAA4jNt20N0Mw9aBryBi7/Pd/vNYB/4VyI7LWqKJi19gzO9Q3C8FwXN7ne06vi/CIi3qTgQERESpIBRBhjanvYNwtnF6WnjTFFBjIbY4KMMbHeKIS19hecwUhivs1TgZ+AmcaYd40x/8Q5JWo/nKsl3wx8DQwGhlhrf/RGWYrxuut9ojGmlnujaxG0/8HZ8rGwCs8vIuIVGnMgIiIlSQOuBlYZYz4CzgBbrbXLrLUZxpg7cK6PsNEYk4bzZtyBs2tPD5zjEmp5zrrc/gEsN8bcZ62dYa09bIzpDUzBOTgZ4EOc05kuBJq4yv9Pa+23HnMshivQaOL6tZfr/a/GmLtdPy+21i7Ol2Q+zrUd7gC+MMYsw3nt/4Oz5WWEtfZ4ecogIuILCg5ERKQkE3FOwdkP6InzRnc2sAzAWptmjPkNzgXKfoezW81ZnE/00/Hi03Jr7XvGmP8HvGaMOWytXWKt3Q0M9HB4dCVPdwfO1aHz65vv5z3kWwzOWmuNMXfh7F50LzAKOI1znMNEa+1/KlkeEZFqYYpO8iAiIuKfXN2b3sM59uAVnDfeRWYhMsZEAqMBrLVPVGcZRUQCmYIDEREJKK4Zgf4BPIxzDYENwFc4xz/Ux7nYWXcgG3jUWpvkm5KKiAQeBQciIhKQjDGXA/fh7O7TBmdgcATYCiwHZllrs3xXQhGRwKPgQEREREREAE1lKiIiIiIiLgoOREREREQEUHAgIiIiIiIuCg5ERERERARQcCAiIiIiIi4KDkREREREBFBwICIiIiIiLgoOREREREQEUHAgIiIiIiIuCg5ERERERARQcCAiIiIiIi4KDkREREREBFBwICIiIiIiLgoOREREREQEgP8PQGJ/jjK4tn0AAAAASUVORK5CYII=\n", - "text/plain": [ - "
" - ] - }, - "metadata": { - "image/png": { - "height": 261, - "width": 387 - }, - "needs_background": "light" - }, - "output_type": "display_data" - } - ], - "source": [ - "for key in \"IVF16k\", \"IVF16k_HNSW\", \"IVF16k_2level\", \"IVF16k_2level_HNSW\": \n", - " \n", - "\n", - " indexkey, res, keys, stats = parse_result_file(find_latest_version(\n", - " f\"../logs/{dsname}.{key}.b.log\"))\n", - " \n", - " if res.size == 0: \n", - " print(\"skip\", key)\n", - " continue\n", - " \n", - " r10 = res[:, 0]\n", - " qps = 1000 / res[:, 1]\n", - " \n", - " pyplot.semilogy(r10, qps, label=key)\n", - "\n", - "\n", - "pyplot.xlabel(\"inter @ 10\")\n", - "pyplot.ylabel(\"QPS\")\n", - "pyplot.legend()\n", - "pyplot.grid()" - ] - }, - { - "cell_type": "markdown", - "id": "140cd7a7", - "metadata": {}, - "source": [ - "It performs pretty well, even in combination with HNSW and 2-level encoding. \n", - "\n", - "# Compression \n", - "\n", - "Here we fix the coarse quantizer and evaluate compression options" - ] - }, - { - "cell_type": "code", - "execution_count": 13, - "id": "f01c7309", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "IVF16k\n", - "IVF16k,PQ48\n", - "IVF16k,PQ64\n", - "IVF16k,RR192_PQ32\n", - "IVF16k,SQ4\n", - "IVF16k,SQ4_PCAR100\n", - "IVF16k,SQ6\n", - "IVF16k,SQ6_nores\n", - "skip IVF16k,SQ6_nores\n", - "IVF16k,SQ8\n", - "IVF16k,SQ8_PQ32\n", - "IVF16k,SQ8_PQ32_nores\n", - "skip IVF16k,SQ8_PQ32_nores\n", - "IVF16k,SQ8_nores\n", - "skip IVF16k,SQ8_nores\n" - ] - }, - { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAABTUAAANwCAYAAAD+zI5wAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjMuNCwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8QVMy6AAAACXBIWXMAABYlAAAWJQFJUiTwAAEAAElEQVR4nOzdd3zrV33/8dfRsqxhy1Pe4+59c1dyb+bNgpAQIGwoqxRIwgotlFLg14RSIJRVQhJICmW0DaMtKySE7H1zk7uTu+x7va9tecqWbGt/f398ZVnyuravbdm+n+fjoYek79KR75Wv79ufcz5K0zSEEEIIIYQQQgghhBBisTCkewBCCCGEEEIIIYQQQggxHRJqCiGEEEIIIYQQQgghFhUJNYUQQgghhBBCCCGEEIuKhJpCCCGEEEIIIYQQQohFRUJNIYQQQgghhBBCCCHEoiKhphBCCCGEEEIIIYQQYlGRUFMIIYQQQgghhBBCCLGoSKgphBBCCCGEEEIIIYRYVCTUFEIIIYQQQgghhBBCLCoSagohhBBCCCGEEEIIIRYVCTWFEEIIIYQQQgghhBCLioSaQgghhBBCCCGEEEKIRUVCTSGEEEIIIYQQQgghxKIioaYQQgghhJg3SqkGpZSmlNqd7rEIIYQQQojFS0JNIYQQQog0UEp9SCl1h1LqgnSPZZhS6i3xMe2e5BillLpcKfUtpdQepVSPUiqslOpQSj0Wf1/yM+Y8UEplKKVer5T6slLqD0qp1nhgrCmlrpviNZRS6mPxP0uvUsqnlDqolPp7pZRlkvO0pNsfp/A6v08+ZzrvUwghhBBiPKZ0D0AIIYQQ4jz1IeAKoAE4lM6BJHkL8MH446cnOOaLwL8kPY8CfqAAuCZ++7BS6o2apvWPc/5pIAAMzsJ4z3drgUdmerJSygz8Hrg+vimE/ud5Qfz2DqXUVZqm+c9yqeuUUgWapnVO8Dp5Sa8hhBBCCDEr5LfoQgghhBBiOsxAD/A9YBdg1TTNBeQBX0EPxS4DfjzeyZqmXa1p2hpN016en+EueV7gCeBO4O3TPPdf0MPGAHrIbgPswI3of8Y7gPvOco0m9L8T75nkmPfEj2mc5viEEEIIISYkoaYQQgghhJiO3wHVmqb9naZpL2maFgHQNK1H07Q7gK/Gj3uHUqoyXYM8TxwBcjVNu0bTtH/UNO3/pnqiUqoIuC3+9B80Tfu5pmlRTfcn4MPxfe9RSm2a5FL/Fb9//yTHfGDUsUIIIYQQ50xCTSGEEEKIeRRfc1JDn3oO8NNR6xM2jDreopT6pFLqufj6lUGlVKNS6j+UUmvHuf4D8evUKKVs4+zPUEodSV4LUSm1Oz6m4annt48aU2INRE3TDk8wrXzYz5Iebxvn9cdtFDT8dVFKPR1//h6l1ItKqX6lVKdS6nfJ71cpVayU+kH8egGl1Cml1BeUUsbxBqWUWqaU+qxS6gmlVH38HK9S6qX49sxJ3hNKqXVKqV/H1w4dUkqdUEp9RSllja9DqimlfjbJ+TfG171sV0qF4td5UCn1+sledzKapsU0TZvp+pRvAzKAPuD+ca79B6AGUMB7J7nO80A9sH2Cv4+r0Ss+m4BnZjhWIYQQQogxJNQUQgghhJhfQ4AHCMef98efD98S6xIqpYqBl4EfAJcC2UAQqAD+GjiglHrrqOt/HGgGVgLfGef1vwZsBDqAj8S3heKvHYg/Hxg1Js803l930uNxA8azUUp9E3gAPQwDyEdf7/N5pdQqpdRK9K/LJ4Fc9HXilwPfAO6a4LK/Ab4NXAVUov85ZAEXxbc/q5RyTjCea4D9wDvR1w4NAdXAPwFPoYeDE70Xs1Lqv4A/Am8C3PHXLgDeCDyilPrXSb8gc+PK+P2zmqYFJjjm0fj9VZNcR2OkAvMD4+wfDsr/K36sEEIIIcSskFBTCCGEEGIeaZr2a03TioAX45tu0zStKOm2AxJNXP4AbAaeBS4HMjVNywKK0ANLK/CfSqnlSdf3oq+PqAG3KKVuGN6nlLoC+Nv4049qmtYRP+fF+Jh+Hd/37VFjKprGW7wi6fFr0zhv2AXxMX4GyI6/303ASfQA8070gKwZuCC+Pwv4cvz8W5VSG8a57sH4NVegrwOaA2SiB401wPb4tVMopfKBX6F/rV8GNmqalg04gL8CNgC3TPJ+/jV+XAN6xaMzfr4TuBk91P57pdRka1LOhXXx+6OTHHMsfr9WKaUmOe4X8fu/Ukmd7+Pn/NWoY4QQQgghZoWEmkIIIYQQC9MH0SsVXwFep2nac5qmhQA0TfNomvY54IfozV3+NvlETdOeRG/kA/ATpVSBUioL+Dn6z38/1jTtj7M94Hig9ZX405c0TTs+g8tkA1/TNO37mqYNAmia9irw0fj+m9CrUK/XNO1wfP+gpmlfA55Eny49unoVTdM+Gr/m6aSvY1DTtAeBNwAR4EPjTNn/FHoTpA7g9ZqmvRY/N6xp2gPA3wCu8d5IvKL00+jNfK7WNO2Xw53ENU3za5p2f9L7+tJ0vkizoDh+3zrJMcP7HPHbuDRNOwXsAcqB3Um7rkSvKn5Z07STMx6pEEIIIcQ4JNQUQgghhFiYhqft3qNpWnCCYx6I3187zr4vAq+iT3f+d+Bu9GnXpxkVgs6ir6KvoxlhpAnNdIWA746z/QVGpsf/MF6ROtoT8fvxKjUnpGlaHXrFog29UjTZcEB6/3ivqWnab4C6CS79AfSft38ff43x/BZ9SYH18eUG5os9fj80yTGDSY8nDDXjhisxk6egf2DUPiGEEEKIWWNK9wCEEEIIIUQqpZQJuDD+9LvxNSbHM7xmZfnoHZqmBZVSf4Ve6fnm+OYo8P7hasHZFJ8+/Y/xp/+oadrLM7xUg6ZpvtEbNU2LKaW6gDImntY+vPZnzgRjvBa9q/eF6JWK4zUHKkk6PoORadrPTzLm54Fl42y/OH7/dqXUGyY53xy/LwfaJjluLszWOpe/Bv4NeJtS6uPxbW9FXzv2V7P0GkIIIYQQCRJqCiGEEEIsPLmAJenx2YzbuVvTtFeVUt9lJGz8rqZpe2ZhfCni63b+HH3q912apn37HC43WagXPcsxw/vNo3cope5Cn0o+LAz0MNKwKTd+nj3pmBxGZjZNNq6JpnAPV15OOn07iS0+1ncB3x/vgGmubzqZAfRp86On248ZT9ykQbimab1KqT+hd1W/Cf3vghO9SrV7snOFEEIIIWZCQk0hhBBCiIUneYmgzZqmHZnJRZRSdvSO3cN2KaUMmqbFzml0qa9xNfC/6IHgT9Gb8Swo8SrJT6GHnl9FbzRUp2malnTMc+gd5pMb4kzWHGcqhv8cb9M0baKu7OPJRF82YC61ooeaJZMcM7zPz1lCzbhfoIeaH2DkaydTz4UQQggxJ2RNTSGEEEKIhaebkarDdZMdeBb/BixH7xTejx7aff6cRpZEKXUp8Ef0zuC/Qe+oPlvTmWfTO+L3P9Y07SvxZkGjxzleiNgDDAfAk613OdG+4enw0/oz1DTtZ5qmqfFu07nOWQx3Nl8/yTHD4z4+xT/XPwNdwNXxWw/w0IxHKIQQQggxCQk1hRBCCCHSYzgsGxNUaZoWBvbFn47p5D0VSqkbgY/EX+f96F24Ab6ilLpgumMa5/oXogdWNuBB4H2apkUnPyttyuL3B8fbqZSqBFaM3h5v0DQc/l06yfUn2jc81f9GpdSYKfFp9lT8/jKllHWCY4YbUD0xwf4U8b+3v0Jf69UA/Hq407wQQgghxGyTUFMIIYQQIj364/euCfb/LH7/NqXUlZNdSCmVM+p5AfDj+NPvapr2jKZpP0fvtG0B/iveBGe6Yxq+/mbgESALeAx4RzzQWqj64vcbJ9j/dSYOcn8Xv/+oUip79E6l1NvQq2HH83P0oLiEkXVNxzX6z3AeDHddd6GH36PHcyOwGr2R0C+ncd17gO/Eb3ef8yiFEEIIISYgoaYQQgghRHocjd+/dbywDPgJ8BL6z2t/UkrdppRKNA1SShUqpd6jlHoauG3UuT8GCoFXgS8nbb8ZaEefcvyNScZ0nVJq3CnVSqnVwKPoTXSeBd4Sr2hcyB6L39+slPqwUsoCoJSqUEr9HHgP0DvBuT9An0btBv6slFofP9eklHo3+jqi3vFO1DTtOPoSAKBXyN6jlEp0SVdKOZRS1yql/hP4n5m8MaVUjlIqf/iWtCsrefvoSlFN09oZaUb0r0qp9yuljPFrXh9/XwC/nM6arpqmndA07XPx27GznyGEEEIIMTMSagohhBBCpMd/AiH0qctdSqkzSqkGpdTzkJjK+2bgBfQp3v8WP65HKeVDX6/xAeAK9Go6AJRSHwXeFL/2+5IDR03TuhipyvuMUuqqUWP6HXqAtwpoUUq1xcfUkHTMP6AHpqBXPtYppdonuH3unL5Cs+dn6AGxCT0sHlRK9QKN6E1tbgfGDe40TesE3ote1bgLeE0p5UVvnPPL+Hk/ih8+Xrj7eeCH8ccfB04rpfrjr9+PHhC/D33K9kwcBDqTbsN+PWr7JeOc+2XgYfTGRL8ABpRSA+jLCuQBrwC3zHBcQgghhBBzSkJNIYQQQog00DTtBPqahY+gT48uAioZWf8RTdM60EPLv0IPnzoAB/pU6RPoAd316NOnUUotB74bP/3L41XYaZr2EHB//Bo/U0q5kvZ1AVeiT03uBAriY6pMukTyz4856BWME90c0/mazJX4uo7XAHcCdehTwiPoFZw3apr21bOc/xdgO3qX924gA6hHD0OvRg8FYZyKTU3TopqmfRw9vP4v9CDVEj+nCT1I/iDwlnN4izMSD85vRA8uX0IPZTXgEHp4fammab75HpcQQgghxFSohdmgUgghhBBCiMVBKfUcemj515qm/SzNwxFCCCGEOC9IqCmEEEIIIcQMKaV2AS+iV39WaZrWnOYhCSGEEEKcF0zpHoAQQgghhBALmVLqY0A++jqVDZqmRZVSDuCtwPfih/1GAk0hhBBCiPkjlZpCCCGEEEJMQin1L8CX4k+j6GuguhhZX/QQcG18TVIhhBBCCDEPpFJTCCGEEEKIyf0KvbHPFeiNnHLRO5cfQ28e9CNN04bSNzwhhBBCiPOPVGoKIYQQQgghhBBCCCEWFcPZDxFCCCGEEEIIIYQQQoiFQ0JNIYQQQgghhBBCCCHEoiKhphBCCCGEEEIIIYQQYlGRUFMIIYQQQgghhBBCCLGoSKgphBBCCCGEEEIIIYRYVEzpHsD5QilVD2QBDWkeihBCCCGEEEIIIYQQC0EV0K9pWvV0T5RQc/5kZWZm5q5duzY33QOZbT6fDwCn05nmkQhxfpHPnhDpI58/IdJDPntCpI98/oRIj6X+2Tt+/DhDQ0MzOldCzfnTsHbt2tz9+/enexyz7umnnwZg9+7daR2HEOcb+ewJkT7y+RMiPeSzJ0T6yOdPiPRY6p+9bdu2ceDAgYaZnCtragohhBBCCCGEEEIIIRYVCTWFEEIIIYQQQgghhBCLioSaQgghhBBCCCGEEEKIRUVCTSGEEEIIIYQQQgghxKIioaYQQgghhBBCCCGEEGJRkVBTCCGEEEIIIYQQQgixqEioKYQQQgghhBBCCCGEWFQk1BRCCCGEEEIIIYQQQiwqEmoKIYQQQgghhBBCCCEWFQk1hRBCCCGEEEIIIYQQi4qEmkIIIYQQQgghhBBCiEVFQk0hhBBCCCGEEEIIIcSiIqGmEEIIIYQQQgghhBBiUZFQUwghhBBCCCGEEEIIsahIqCmEEEIIIYQQQgghhFhUJNQUQgghhBBCCCGEEEIsKhJqCiGEEEIIIYQQQgghFhUJNcehlLpDKaWNurWne1xCCCGEEEIIIYQQQggwpXsAC9hJYHfS82iaxiGEEEIIIYQQQgghhEgioebEIpqmSXWmEEIIIYQQQgghhBALzKKcfq6UertS6gdKqeeUUv3x6eH/dZZzypRS/6GUalVKBZVSDUqpf1NK5UxwyjKl1BmlVL1S6ldKqWVz8FaEEEIIIYQQQgghhBDTtFgrNb8MbAb8QAuwZrKDlVLLgReBQuAPwAngQuA24Dql1CWapnUnnbIX+FD8uML4672olFo/6jghhBBCCCGEEEIIIcQ8W6yh5t+ih5mngCuAp85y/L3o4eSnNU37wfBGpdR349f6GnDL8HZN0/6cfLJS6iWgDvgg8N1ZGL8QQgghhBBCCCGEEGKGFuX0c03TntI0rVbTNO1sx8anjb8OaADuGbX7dmAAeL9Syj7J6/mBo8DKGQ9aCCGEEEIIIYQQQggxKxZrpeZ0XBW/f1TTtFjyDk3TfEqpF9BDz53AE+NdQCllRZ/ifraKUJRS+yfYtcbn8/H0009PddyLhs/nA1iS702IhUw+e0Kkj3z+hEgP+ewJkT7y+RMiPZb6Z2/4/c3EoqzUnKbV8fuaCfbXxu9XDW9QSn1bKXWFUqpaKXUR8L+AHfj53A1TaNoZYtrjxLQHiWmPo2ln0j0kIYQQQgghhBBCCLEAnQ+Vmtnx+74J9g9vdyVtKwN+CeQDncBLwE5N0xrP9mKapm0bb7tSar/T6dy6e/fuKQx5cRn+bcFM31tPzwvUN9yN1/tyynYNcGVfSHXVJ8nNveTcBinEEnSunz0hxMzJ50+I9JDPnhDpI58/IdJjqX/2nE7njM89H0LNs1Hx+8T6nJqmvTtNYznvtLb+huMnvgTExt3v9b7MwUMfYu2ar1NS8o4pX9fvr6G390UiET8mk4OcnItxOFad/UQhhBBCCCGEEEIIseCdD6HmcCVm9gT7s0YdJ+ZJT88LkwaaI2IcP/FFrNaSs1ZsTlT1CeBySdWnEEIIIYQQQgghxFJwPoSaJ+P3E5XpDXc0n2jNTTFH6hvu5uyB5rAY9Q33TBpIzlXV53ikElQIIYQQQgghhBAifc6HUHO4Y/nrlFKG5A7oSikncAkwhL5uppgnfn/NuNWUk/F69+L314wbHs5F1ed40lkJKkGqEEIIIYQQQgghhG7Jh5qapp1WSj0KvA74BPCDpN1fQe9qfp+maQPpGN/5qrf3xRmd99rRz+DK3orFko/FUhC/z+PU6W8zm1Wf45nPStBkMqVeCCGEEEIIIYQQItWiDDWVUm8B3hJ/WhS/36WU+ln8cZemaZ9LOuXjwIvAXUqpq4HjwEXAlejTzr80x0MWo0Qi/hmdNzBwkoGBk2c/8Cy83r14vQfJzr4ApdRZj5+vStDRphqkLqu+DZPJMWEVp1R5CiGEEEIIIYQQYilZlKEmcAHwwVHblsVvAI1AItSMV2tuB/4ZuA64HmgD7gK+omlaz1wMMj69fbg3vTkWm2ol4eLSFmrjZOAkJw+fxGFxcFHRRazIWTHpOSaTY55GN7H9B96OUiZMpmwsllzM5hzMZlf8Phez2YUl/vh03XeZ60rQ0aYTpNbVf2/MVqdzAxaLm4GBEwQCZ8bslypPIYQQQgghhBBCLFaLMtTUNO0O4I5pntMM/PVcjGcSnwVuH37i8Xjm+eXn1kttL/Gjwz9iv2e/vqF3ZN829zZu2XwLO4t3jntuTs7FM3rNFSu+iMFgJhTsJBTqIhTuxuc7SjDYPqPraVqEcLibcLh7RudPxOvdS03t17BlVmE02jCabBiNdozGTIxGOyajTd9utGMwWMetFp1eI6WxfL7XgNcmGePcTJefjFSMCiGEEEIIIYQQYjYsylBzEfkOcH/88SNut3tjOgczm35b+1u+sucrxLTxQ7f9nv3c/NjN3LHrDm5aedOY/Q7HKlyuC6fVLMjluojKir8Zs725+WfU1H516oOPU8qCpoWmfd5UNTf/x1RHEg84R4JOUPj9R+dsbCNmd7r8RGRdUCGEEEIIIYQQQswmCTXnkKZpPsAHoJQKGwyGNI9odrzU9tKkgeawmBbjjj13UOwoHrdis7rqkxw89CGmVo1ooLrqE+PumWnV54U7/kBmZiXhSC/hsJdwqIdwxEs41Es43KNvC/fS13+IoaHGGb3G1GhEowNEo+nqVTU70+Unkq4GS0IIIYQQQgghhFi6JNQU0/ajwz86a6A5LKbFuO/wfeOGmrm5l7B2zdemsG6kgbVrvj5h6DbTqs/hac9GYxHWjKIJj51pJWiOayc2WzXR6CCR6ADR6CDR6FA8wByM3waIxYLTvvZs83r34vfXzPpU8HQ1WBJz61TvKfa278Uf8k95HV0hhBBCCCGEEGI2SagppuVU76mRNTSnaJ9nH8+feZ4VrhUYlRGjwYhRGTEZTOQWvolNGcU0Nd47wdTki6iu+sRZg67Zqvocz0wrQVetun1KIaGmRZNCTj0AbWv9LS1nfj6j152p/fvfhdVajMmUhcnkjN+yMJkc42xLfTw764LObcWoOHdj1tFNcrZ1dIUQQgghhBBCiNkkoaaYlr3te2d03q2P33rWY0rMNlZbNTKNENZMNIYz6elqw3j6dkwGUyIINRqMmJQpJSA1G8ysMK5muzqOUqAByRHb8HMNRUPGpRyqewlj/Ssj1zIYU18jfm2TMmEymLBmrMAQPDXl92t1bKI3lkm/vzVxzdHXNRqMGJQBpYyJkHCYodQy76FmJNqPf6B/RufqXeRTA1BQ06qehbmrGBXn7lzX0RVCCCGEEEIIIWaThJpiWvwh/5xduzUMreHhKDIK+OO3qdkLvJyRweuzwqywpgYvCjgVMPCXfjO1wX3AvmmNbWVGlFsLwDC2GHGMmAbfqTtJ7fHrz3qsQRnGBKnDz9+fZaXcHJjWONNF7yLfQzjcc87X6u19UULNBWa21tEVQgghhBBCCCFmi4SaYlocFseMzsu2ZGM1WYlqUaKxKBEtQiQWIRqL6tu06KyMrzZopLbTSJEpxiprlAwDBGNQEzDSHpl5o6baoJFf91p4V04IgwJNg+TZ1sPPYxr8utdCbdA4pevGtBgxLUY4Fh6z749RbcpB6rkYHvsPOyz4YwYyDRpWpZFpgEyDfm8db5vSEo9NszjG/zhyL/v2/d+YkHe8StfEvtHPJzt2eN+oatzkY4b3mQ3mseNQUz82+X686fmLxWytoyuEEEIIIYQQQswWCTXnkFLKCQzPKTbHYlNdW3Dhuqjoohmd97PrfjZpIxFN04hqUT3oTLofDj3DsXDicfK+RDh6lmOviumPI1pqkBqJRcZcL/nc0fv2xNpZpdVQoHpTxq8UtEYdvBJy02WyUZU19vVSrhXfN5mzBamzRSm9ivVkcObfDkxoWA1gM2hYDRqZCrbYIux0TD+s7gj00+AfmvFYFqqpBKmTLa8w/Dg5qO3u6saAgadefCpx/YlC3cmWV5gsAG7zt81oHd1TvaekeZAQQgghhBBCiDkjoebc+ixw+/ATj8eTxqHMjhU5K9jm3jatkGO7e/tZww2lVCJgWQz8/hp6e18kEvFjMjnIybkYh2MV75/GNTRNI6bFRgLUCQJQn/dletv/i6D/1Tl5LxqKiOs63pXrHhO6JgLgWJSwFk55Prx/dBAdiUXp1SIcDA2xE8+Uw9jh42oCU6tyXWwSFclz8LuNl2unt3bpfNjbvldCTSGEEEIIIYQQc2ZxJEiL13eA++OPH3G73RvTOZjZcsvmW7j5sZunNB3VoAzcvPnmeRjV/HI4Vp3zuo9KKb1CDiMWo2XiA7MqoOLtY4LUcMRHff1dnFtKZmDdmq9zTck7zuEaE9t/4D1TbhakFNicF/Af2/9tJDAddT+l6trpHDu8b3SV8FleM6qNXUJhvErc2V5eYTGZy/V3hRBCCCGEEEIICTXnkKZpPsAHoJQKGwwzX9NxIdlZvJPbd91+1sYhBmXgjl13yNp6s2S8INWVvZX6hnvweqffld7luojqqk+Qm3vJbA1xjOqqT3Lw0IeYWvBqYPXyvyM3q3zOxpMuky2vMFGV7lSWYnjt6GtEibJy1cpxg9TkYyerrh1vKYfhY9sG2jjjPzPt9zzT9XeFEEIIIYQQQoipkFBTzMhbV76VEkcJ9x2+j32esZ3Et7u3c/PmmyXQnGO5uZeQm3tJShVnKNRJX/9hfL6x09Wt1jLy8nZTVvpX89JhPDf3Etau+RrHT3yJyYNNA2vXfH1OA9Z0mqvlFTIbMwHYvWr3rF432aneU9z0x5umfd5M198VQgghhBBCCCGmQkJNMWM7i3eys3gnv3z0l5wMnKS4shiHxcFFRRfJWnrzbLwqzonW/ZxvJSXvxGotnbCidD4qRsXMzWQd3bW5a8/r7wF33HFHyr0QQgghhBBCiNknoaY4Z8WWYootxezevDvdQxFJZmPdz9kyXFHa13eQffvfDoBSZi7c8ccFM0YxsemsowtQ21vLH0//kTctf9Mcj0wIIYQQQgghxPlqaSzyKIRYFLKyNjP8bUfTwths1ekdkJiS4XV0DWpq/2REtAhfev5LfPPlbxKJReZ4dEIIIYQQQgghzkcSagoh5o1SBkymrMTzSKQvjaMR0/HWlW/lvmvvY7t7+7j7t7u387VLvsYK18i08/86/l/c8tgt9AZ652uYQgghhBBCCCHOEzL9XMyujuNQ9wwEfZDhhGVXQOHadI9KLCBmcxaRiBeAcLgfiyU/vQMSUza8ju6p3lPsbd+LP+Qfs47u1ZVX86Xnv8QTTU8AsLd9L+/+07v5/lXfZ03umnQOf160t7cnHgcCAaxWaxpHI4QQQgghhBBLl4SaYla4eg/DT/8VGl8Yu7PyErji87Bs97yPSyw8JlN24rFUai5OK3JWTNgIyG62893d3+Xfj/w7dx+6G4DWgVbe//D7+edL/pk3VL9hPoc65yZrBnTnnXeO2Xb99dfjcrkSN4vFMoejE0IIIYQQQoilS0LNOaSUcgLO+FNzLDa1JhuLTVHbY6w+eQ+gjX9A4wvwnzfBjXfB1vfP69gWnfOg0tWcFGqGJdRckgzKwM2bb2Z17mq+8NwXGAgPEIgG+Pyzn+d4z3Fu23IbRoMx3cNMi4cffjjluc1mIycnJyXodLlc5OTkkJ2djdlsTtNIhRBCCCGEEGJhk1Bzbn0WuH34icfjSeNQ5kjd06w+eQ9qokBzmBaDBz8NrnKp2BxP3dPwzPlR6WoyJ62pGe5P40jEXNtdvpsHbniA2568jYb+BgB++tpPqemp4ZuXf5PsjOzJL7AIjK7UfO6553jiCX3qvdFoJBqNTnr+4OAgg4ODnDlzZtz9DocjEXKODj6zs7MxmeSfcSGEEEIIIcT5Sf43NLe+A9wff/yI2+3emM7BzIln/vXsgeYwLQbPfGvJhHOz5sAv4MHb9K/PeJZYpatUap5flmUv44EbHuALz32BZ1ueBeCF1hd4z0Pv4a4r75pwGvtiddlllyVCzS996UsMDAzg9Xrxer309vYmHg/fzlbB7/f78fv9tLS0jLs/KysrpbozOfTMysrCaDw/K2KFEEIIIYQQS5+EmnNI0zQf4ANQSoUNhiXWbL7jODS+gAaoqZ7T+Dw0vqhPqTaYwGCO3xtBTfkqS0fd05MHmsOWUKWryZy8puYiqtQ8D5YGmCtOi5MfXPUD7jl0D/cf0X/P0+xr5r0Pv5evX/p1rqm8Js0jnBsGgwGn04nT6aS8vHzM/lgshs/nSwk5k4PPvr4+NG3yXxr19/fT399PU1PTmH1KKbKzs8dMax9+7HQ6WXL/LgkhhBBCCCHOGxJqipmrewaYRqA57KcTNAoxmJKCTqP+2GhO2h6/GU1jjx3vuMT5xqTwNPn8pEB13NdJ2j9b5xvNqeHtM/969kBz2BKpdDWbkqefL4JKzfNoaYC5ZFAGPrXlU6zJXcOXnv8SQ5EhhiJD/O3Tf8vNm27m4xd8HIM6vwI2g8FAdnY22dnZVFZWjtkfjUbp7+8fU905HHz290/+SwFN0xLnTPb6E63pabfbJfQ8z5zqPcXe9r34Q34cFgcXFV205KqphRBCCCHE0iGhppi5oG92rxeL6DcCs3vdhUYZ9IBTGSEyNL1zG5+HX78fnEVgygBTZvzeCmarfp9yywBz0jGjtxtM814ha0qZfr7AKzXTvTTAEqwOvbbyWqqyqvj0k5+mxa9Pqb7vyH2c6DnBNy77Bk6L8yxXOH8YjUZycnLIyckZd38kEqG/v3/cae29vb34/f5Jrx+Lxejt7aW3t3fC159oarvL5cJut6POxwr7Jeiltpf40eEfsd+zf8y+be5t3LL5FnYW70zDyIQQQgghhJiYhJpi5jJmGD6YbXrl4nCIGQtPvVpxKdBiEA3N/Pzjf5y9sSjD9ELQ8banBKpnD1pNZCRePrKQ19RM59IAS7w6dGXOSn71xl/x+Wc/z4utLwLwTMszvPeh93LXVXdRnV2dOPaeW54E4BM/uiotY52J0c2D5orJZCI3N5fc3Nxx94fDYfr6+iZcz3NgYGDS60ejUbq7u+nu7h53v9lsnnBqu8vlIjMzU0LPReC3tb/lK3u+QmyC73X7Pfu5+bGbuWPXHdy08qZ5Hp0QQgghhBATk1BTzNyyKwCmt6YmwEefHFttFoslhZyjbtHwONsiqaHomG2TnJty7PD+6ASvk7Q/8Trh+POpnp90biwyW1/92aHFIDyo3+aJ2WWGTXq1ZvjUn+HPy2ceqE5p+6ig1WSFqUypTdfSANOpDmXsOo0pFnClZ3ZGNvdefS/fP/B9fnr0pwA09Dfw3ofey52X3ckV5VekeYQzF4tpfOpXB6nOs7PS7WCV28myAjsZpvlt2mM2m8nPzyc/P3/c/aFQaMKp7V6vl6GhySvJw+EwnZ2ddHZ2jrvfYrFMOLXd5XJhtVrP+T2Kc/NS20uTBprDYlqMO/bcQbGjWCo2hRBCCCHEgiGhppi5wrVQeQlqvGqyiVReOn6oYjCAwQJYZm14C5KmjQSie++Dx/9p+tdYfxOU74RIIOkWhPCQfj+d7Vp09t/jWZgiI/95jhg1GOya9zFgtIwThCY9j0agec/0rtn4PHiOgXvdzMc1zepQ16Y78OZsHv86i6DS02gw8nfb/441uWu4/cXbCUQD+MN+PvXkp/jEBZ/go5s+mu4hzsgZ7xAPHWlL2WY0KCrzbKwqdLLK7WCl28kqt5PqfDsWU3rWrbRYLBQWFlJYWDju/kAgMCb0TA4+g8HgpNcPhUJ4PB48Hs+4+61W64RT210uFxkZGeOeJ2bPjw7/6KyB5rCYFuO+w/dJqCmEEEIIIRYMCTXFubni82i/eAuKyTv0AvpU5yv+fu7HtJAppTcaMppg1etmFmpe8Q+zV20XjYyEnJGk8DMcGBWCnkNwOmq7mZGp9xFTmqamRkP6LTjLa3r+6BKw5UFmzvRuGVl6sD/N6tCqht9waHSome51QGfg+mXXU51dzW1P3UbbQBsaGncfupsT3SdYwY3pHt601XaMXW84GtOo6xygrnOAR46ObDcZFFX5dj3oLNSDzlVuB1X5dszG9DbpsVqtFBUVUVRUNO7+oaGhCae29/b2Eg6HJ71+IBCgvb2d9vb2cffbbLZJ1/Q0m83n/B7PF5qmEdEihKNhwrEwoWiIGm/NuGtoTmafZx+nek9J8yAhhBBCCLEgSKgpzs2y3Zxc/QlWn7xn8mBTGfQQZQFUhy0Y8UrXcavpJjJRpetMGU1gdECGY/aueRamcB88txWAsD0LPvfKBIHq6KD1XALVUdeaK1oMBjr123QoA1gc0w5ZXX2vYRtoGtmQznVAz9HavLX86o2/4nPPfI5X2l8BwPrI6sT+Ey+14a7KwlVo43Tf6QXdoXlDSTbfe9dmajx+aj0+ajx+mnsH0cb5FhmJaZzq8HOqww+MhHtmo6I6365XdCZVd1bl2TClOewclpmZSWZmJsXFxWP2aZrG4ODguNPah2+RyOTLcQwODjI4OEhra+u4+x0Ox4RT27OzszGZ0vMjTjQW1YPDWIhQNEQ4qj8evg9FQ4lgcfh+eH/y81BUv0VikZTnoVh8W9LzcCysXz/p3MS2+HNtKr98nIK97XsX1OdNCCGEEEKcvyTUFOesvfhaAtZCLvA+qk/BHa3yUr1Cc4GEJwvKFZ/Xq+amUp23RCpdTSYn+iqsGtHoADFbDgZDwfwNQNMmDjuHA9Wjv4P9P53HMcVmXDVaVf8A1BRBbjU8fWd61gGdJb/8zCG28T628b4x+5742fFRW4p5ZPW/0+FoYtBy54Lq0FyYZeWmLWUp2wZDEU53DFDj8VHT4aPW46fG46Old/x1K8NRjRqPnxqPn4cYmcpuMRpYVjAcdjpYVaRXd1bk2jAaFk5THqUUdrsdu91OaWnpmP2apuH3+yec2t7X10c0OvnyGH6/H7/fT0tLy7j7rXYrNqeNDEcGGc4MTDYTJpsJZVeQAREiUwoaJwok+/x9RLQIxt8YU7ZH07Csx3zyh/zpHoIQQgghhBCAhJpzSinlBIZbhJtjsaXb4dubsxluum1BNyZZkJbthhu/f/bquiVU6aqUAZPJSSSih3iRSD8Wy/jdm+doAPr6meZJmpQ4CmcWan7sGXAWwVDv2W+DPTDk1R+Hxk5XnqrCrj3wwDtmdnLj8/pndpF+Rq87qa+56bf04nE0cu/BX9NwUQdvvfh6LNb5+eet5QvPAVB252WTHmezmNhYls3GsuyU7QPBCKc69ICztsPPyXYftR4frX3jVxSHojFOtPs40Z76dybDZGB5gSNlvc5VbgflOTYM5xh2appGJBYZW0U4TnXhlKoSx9kWjoUJ2UOEMkOE3fo2LaBBAAwBA6aACXPIjCVkwRqyYo1YMTB5xWpgIEBgYPyvY4wYQ6YhBk2DDJgGUu/NAwwZh6beAW/yfkoLhkEZsBgsmA1mzEYz4VgY3wy+9zgs81fZL4QQQgghxGQk1JxbnwVuH34yUbOEJaVw7aINSNJm6wfAVaFXzZ0nla4mU3ZSqNk3v6HmVMx0aYCSC/THzvHXIJxQNAwvfB+e/Or0zpsNdc8smM/sJ350VcrzA42H2fONbgAacl7D7askM+Icc54jlIOjJ4flPRfQ3Qj//ptnyS2x467KorAqC3dVFrmldowLZNp2MnuGic3lLjaXuwAS4WHP4CAnPb3Udng53eWlrqufxp5+ugcHUSoCKgIqGn8cJaYi1A5FqW2MoJpH9puMUVw2A1k2hcMKtgzItGgYjTHCsalNbw7HwrM2dXlGjIA9fotTmiIzkoktYsMesafc28I2bFEbapJU0oABe8SOPWKngLGV4jFiDJoGx4Sdw88DxsCkoafFYMFiHAkQh58nthnMKc8tBot+nNGCxWAh+Ge9CZP7JnciiEw5Puk+8VpGc+Ja472WyZD6I9+p3lPc9MebpvMnAcBFRRdN+xwhhBBCCCHmgoSac+s7wP3xx4+43e6N6RyMWMCW7dZv50mlq9mcRSBeQBWOzHKzntkyn0sDGM2w5oYZhZod+bsodJig7fDMprDPdrOkWbS1cjN7eBKAR9b8O2h6gOn2VVLgr6TQX0HBQDnm2Ngu2T2tA/S0DnD8RX3qtslsoKDCmQg5CyodZOQYiGiRCaceD6+HmLw+YmKtwvhx16KvLfjd/d8965qGE62bmHzchJ2oTUBhSq43ZT7AFwUG4rclQFMag+ZBQhkhBg2DY8K+DJWBLWrDGrZiDVmxhC2Yg2ZMAROGoAEVnLwM04ABR8SBIzJ+VaLBaMBkNmGxWqiorCDHlUNOTg75ufnk5eThcDhQauYVsnf8+Q4Abt1864yvcTYrclawzb1tWs2Ctru3y3qaQgghhBBiwZBQcw5pmuZD//8kSqmwwbDwqoTEAnOeVLqaTCNTcCPhvjSOZBLzvTTADKpDvdkbOLbhCxTu3g0v/Qge+Yfpv+6eeyHQB+veAqXb9C7sC5UCf0Yv/oxeTucf0jdpBnIGi3APh5z+SnIHi8dMTY6EY7Sd7qPt9MjftyGTnw5HIx2OJjocjXQ6mgiYp5f6Xcu9APz0tXlcgzUNjMo4pjowURmY9Dz5ftyqwnEqEyerLkxce7jiMaky0WQwYVAz+/saDofp6+sbdz1Pr9fLwMDkfw9i0ZgeTAdCHPMeG7PfZDJN2LU9JyeHzMzMcwo9Z8stm2/h5sdunjhMH+V9a8eudyuEEEIIIUS6SKgphJh35qRQMxxZoKEmzP/SANOsDm2oeufI82VXzOw1A72w5279llUKa98E694M5Rct7IAzTlMxeuyt9NhbOe7eA4ApaiF/oCwedOphpzOYN+bczIiDSu96Kr3rE9v6Mrr0oNPZSIejkS7bGaLG8Liv/fdnPpR4/Nbuq2nKaKMpo51OUy+amtl07dHrHiaHesNTjCcKDMcLGCNRA96BGN0DMbr7o3j6I3i8EfoDGmgm0ExomlF/HEt6rBnR4vvRjDgzLJS6HawqdLLS7Yiv2enEnZWxIMK56TKbzeTn55Ofnz/u/lAoRF9f35iu7cPPh4YmX0gzEonQ1dVFV1fXuPstFsuYju3Jt/mys3gnt++6na/s+cqUgs37X72fHcU7yLJkzcPohBBCCCGEmJyEmkKIeWcyj/yHOBJeuNOfgfldGmCa1aHe/vKRbTNZB1QZIblTc/8Z2PtD/eYognXxgLNiFxiM03476RIxhmjPqqM9qy6xLTPkpGCgArdPDzkL/ZVkRG1jzs0O5pMdzGdl9zZAD00D2X2E8vr4QPfKCV/zox1vG7PNuypCOFcRyzdCvgVjjgWzafLKxNHrHs6Vbn+QGo+f2g6f3pHd46fW46N3cPwA1xeMcLDJy8Emb8r2LKuJVW5nvDmRI/7YQYFjcYadwywWCwUFBRQUjF1vEyAYDPLYY48RCAQoKysbE3wGg8FJrx8Khejo6KCjo2PS4x588EGKiopwu9243W4yMsYutXCu3rryrZQ4Srjv8H3s8+wbs78qq4qG/gYAjnUf49bHbuW+a++ThkFCCCGEECLtJNQUQsw7LTYSpHV1P43LtQOHY1UaRzQFc7g0QLC2loE9LxEb8GOwO7Bffg8ZDf999urQp59O3TfddUDf+xv98bHfw4k/6Z3Yh/nb4eX79Zu9ANbeqAeclZeCcf7+6XDd1sadL9857fM+uO6D3Lj8xtQmLUkVjEZlpL8zgKehH09DPx0N/XQ2+4hFUisslWYg05tDpjcHXNMce03y1ymEMkcwFWiYC42Y3AbMhRZMhTZMuZko4/wGgHmODHY5Mti1fKSCVdM0Ov1Baj3+lKCzxuOjPxAZ9zr9gQj7GnvZ19ibst1lM6dUdQ7f5ztmP5RLh4yMDBwOBw6Hg507d47ZPzQ0NOHUdq/XSygUmtLr7N8/dr3LNWvW4Ha7E2Gny+XiXJe32Vm8k53FOznVe4q97Xvxh/w4LA4uKrqIFTkr+N+a/+Ure74CwJGuI3z8iY/zo2t+hM089hcDQgghhBBCzBcJNYUQ86an5wXqG+7G6305sa27+0m6u5/E5bqQ6qpPkpt7SRpHOL8G9uyh6557Gdw3tjrKtn07+X91P/ac3qlXh053HdCV1+jPV14Db/weNDwPx/4Axx+EwaRpswOdsO8/9JstD9a8UQ84qy/XmxzNoZl2Wn7LirectaGJy23D5bax+iK9W300EqP7jB9P/UjQ2ds+mDj+D97UKsYMBddl6++/WYMcqxFbVMMQGf9rr4VjhFsHCLeOWq/RpDDn2zC5bZgLbZgKbZjdNkx5VtQ8dmxXSlHotFLotHLJipFp2Zqm0eELjgk6az1+fMHxw07vYJiXG3p4uaEnZXuu3cLKwuHp6454haeTXLtlTt/bfMvMzCQzM5Pi4uIx+zRNY2hoKBF0/s///M+0rn3ixAlOnDiReG6xWBKVnMNhZ2Fh4YyqOlfkrBj3c/P2VW8nHAvz9b1fB+Bgx0E++eQnuefqe8g0ZU77dYQQQgghhJgNEmoKIeZFa+tvOH7iS8D4gY/X+zIHD32ItWu+TknJO+Z3cGng/d//pe2fbofY+F+PwX37aDpwgOKv/jOut90y9QvPdB1QoxmWX6nfrv82NL04EnD6PUkD64YDP9dvVtdIwLlsN5hmP5iazw7NRpOBwsosCiuz2BjfFhyK0NGoB5yeev022K9X2QWTijoP9IWhTw89LQqcRoXbZSHfYcZhUJiHIjA0fgBIRCPcPkC4fYCUlRoNClN+ph5wxoNOc6ENU34myjS/Yac7y4o7y8plK0emY2uaRnt/ICXoHH48EIqOe62egRB763vYW58aduY7LKwsTA06V7kduGxLK+wE/etps9mw2WyUlpayfv36lP2apvGVr+hVkddeey3t7e14PB66urqIjfP9IhQK0dzcTHNzc8r2nJyclKnrRUVFuFyuGS8L8J417yEcDfOtfd8C4JX2V/j0k5/m7qvvJsO4NCpwhRBCCCHE4iKhphBizvX0vDBpoDkixvETX8RqLVnSFZsDe/ZMGmgmxGK0/b9/wlxSgn3Xrqm/wLmuA2o06VWY1ZfDG/4VmvfqAeexP4CvbeS4gBcO/Zd+y8iGNdfHA84rwWyd+njPYjodmg3KwM2bb561187INFG+JpfyNbmAHjgNeIOJSk72to05J6RBd0SjuysIXSNrK2aYFOVFNgpzMnBZjFjDUTRvkFj/BFORYxqRjkEiHYOp2w1gysvUg87hqs5CG+aCTJR5/tY+VUpRnJ1JcXYmV6xKDTtb+wLxas6RoLO2w8/gBGFnlz9El7+bPXXdKdsLnBl60FnoTKnuzM6c2wrhdEoOHS+5ZOT7YCQSobOzE4/Hg8fjSYSdg4OD412G3t5eent7OX78eGJbRkYGhYWFKWGn2+3GYplaePyB9R8gokX43v7vAfBS20t85qnP8P0rv4/FuPQCaCGEEEIIsbBJqCmEmHP1DXdz9kBzWIz6hnuWdKjZdc+9Zw80h8VidN37w+mFmsNmYx1QgxEqL9Zvr/8GtLwyEnD2t4wcF+yDw7/UbxYnrL5ODzhXXAPmc5ueOtUOzQZl4I5dd7CzeOwah7NFKYUjx4ojx8ryLYW0xEPNd//ThXo1Z4MPT30f3WcG0GKp63MGIxqnWgY41TIy/TzDZqK4wklJgZUcmxm7poE3SMQzSLRvgmYzMYh0DhHpHCJwNCkEVGDKtY5MXy8cmc5usMxv2FnqyqTUlcmVqwtHhh3TOOMdijcn8lPT7qOmw8epDj+B8Ph/rp2+IJ2+IC+cSg073VkZ+lqdhU5WF+lB58pCB07r0g07TSYTxcXFKVPaNU3D7/cnAs7hsLOrqwtN08ZcIxgMjlvVmZubm7JO5/BaneNVdX54w4cJR8PcfehuAJ4/8zyfffqzfHf3dzHP8XIUQgghhBBCJJNQUwgxp/z+mpQ1NKfC692L31+z8JsHzUCwtnbcNTQnM/jKKwRra8lYOXH37XlhMEDFRfrt9V+DMwf0JkPHfg/eppHjQj549X/0m9kOq16vB5wrrwWLfUYvfbYOzdvd27l5881zGmhOJq/EQV6Jg7UX688joSidzf540Knf+juHxpwXHIzQcKKXhpElEnHmWimsyqJocyEFLjNOg4FYT4BIxyDhjkGiPYHxB6FBpDtApDtA4Hjq9G5jToYecLrtSdWdmRgy5u/HAINBUZ5rozzXxlVr3IntsZhGc++gHnQmVXee7vQTnGB9Uk9/EE9/kOdqu1K2l2RbE53Yh6exryx0YJ/H9zmflFI4nU6cTicrk74/hMNhurq6xoSdQ0Nj/w4C9PT00NPTM6aqM3nqutvtprCwEIvFws2bbyYUC3H/kfsBeLrlaf7huX/gXy//V0yGpfm1FkIIIYQQC4/85CmEmFO9vS/O+LylGGoO7HlpxuelPdRMphSUbdNv1/4ztB3SqzeP/h5660eOCw/A0d/qN1OmHmyue7MedGY4p/Za8Wn0O4M+drou4tTK97A31DmmQ3M6lN152bjbTRYjxcuzKV6endg25A/R0eBLTF331PcTGAiPOdfXE8DXE+D0gQ5A/1LnljhwVzkp3FVCYZkdp9FAtGtIDzo9+hT1SE8AxhbnARDtDRLtDcLJ1C7lxuwMvUGRYwhz7DQmay/mXCOGNZede5XvFBkMiso8O5V5dq5dNxJ2RmMaTT2DKUFnjcdHXecAoej4YWdrX4DWvgDP1HSmbC91ZbIq0YldDz1XFDqwWZbmj0Fms3ncqk6fzzcm6Ozu7p6wqrOpqYmmpqaU7Xl5ebjdbja5N/G+/Pfxf+3/x5BxiMcaH+OLz32Rb1z2DYyG+asMFkIIIYQQ56+l+dO8EGLBiET883reQhcbmNn7mul580IpKNmi366+HTyvjQSc3bUjx0WG4Pgf9ZvJqk9NHw44rdljr1v3NDzzr9D4QsrmFcCKykvgis+PbXi0gGU6LFRuyKNyQx6gh0z9XYFENWdHQz8dTT6io6Ziaxp0n/HTfcbPsRf06e4mi4GCCifuqiwK1+fjviELR5aZSFcgEXSG4+txRrqHJlz9IdoXJNoXRJ/oXhq/geHhk5htr2BeVolp5arENHajff6mFxsNiup8O9X5dl6/viixPRKN0dgzmBJ01nr81HX5CUfHT3XPeIc44x3iqZMjYadSUJaTyarCkaBzldvJikIH1nlcm3Q8d9xxx6xfUylFVlYWWVlZrFo18gujcDhMZ2fnmLAzEBi/Iri7u5vu7m6OHTsGwPVcT8gQos/SR2t3K7e33c7Nl9xMUVERZrNMRxdCCCGEEHNHQs05pJRyAsOlSObxupYKsdSZTI55PW+hM9hn9r5met68UwqKNuq3K7+kV1kOr8HZOTK1lUgATvxJvxktsPwqPeBc/QbIzIEDv4AHb4OJ1tBsfAH+8ya48S7Y+v75eW+zTClFdkEm2QWZrNyhVyhGozF6zgyMVHM29NPTNjCmAjMSitF2qo+2U32JbZlOsx5yVmXp95eWYrWb0SIxIl1DhJOqOsOeQSKdA6CN3wk7Rh7BwTyCrwGvnUpsNzjM8WnsI+t1mt02DHbzjLtqT5fJaGB5gYPlBQ6u2zCyPRyN0dg9kBJ01nh81HcNEImNDTs1DZp7hmjuGeKJEx2J7UpBRa4t0Y1dr+7UXy/dYedcMJvNlJSUUFJSktimaRr9/f0pDYk8Hs+EVZ2WmIWCQAEFgQLoh5+c+AlKKXJzc8d0YM/Kypq3vytCCCGEEGJpk1Bzbn0WuH34icfjSeNQhEiPnJyLZ3Sep+PPZGdvJStr04TH+P019Pa+SCTix2RykJNz8YKfsm7fNbM1H2d6XlopBe51+u3Kf4TOk3Dsj/oanJ7XRo6LhqDmEf1mMOuBaOtBJpxLPUyLwYOfBlf5oqrYnIzRqFdgFlQ44XK9ajIUiNDZ6EsJOv29Y5sIDfnCNLzaTcOrI011sgszU4LO/LW5mMxGqHsa7RdvJxJzE9YqiGgVhGPl+r1WBoxfYRfzhwn6+wjW9aVsN9hMiYBzJPS0Y3DOX9hpNhpYUehkRaGT6zeOTLsORWI0dA9Qk9SJvcbjo6F7kOgEYWdj9yCN3YM8fnzk322Dgso8OysL9aAz0h2h1GkgGImSYVpaYadSiuzsbLKzs1OqOkOhUKIDe3LYOV5Vp6ZpiarOo0ePJrZbrdYxTYkKCwulqlMIIYQQQkybhJpz6zvA/fHHj7jd7o3pHIwQ6eBwrMLlunDazYL6+vbxyr6byM+/mmXVt+F0rk/s6+l5gfqGu8e9pst1IdVVn1yw3dMzVq7Etn37tJoF2XbsWFjrac5UwWq44u/1W9cpOB6v4Gw7PHJMLAytB6Z+TS0Gz3xryYSa47FYTZSuzqF0dU5i20BfEE99f8rU9VAgOubcvo4h+jqGqHlZD+cMRkV+mYPCof24g5dSaK4lx7gHpUbWvtU0AxGtKB5wlhOJVRA2ryUcLYEJGvfEBiOEGvoJNfSnbFdW00jQmdSV3Zhtmbew02IysCreNChZMBKlrnMgpaqztsNPY/cA42SdxDSo7xqgvmuAR4+NhJ137PkLVXm2lPU6V7mdVOXZsZgMc/325pXFYqG0tJTS0tLENk3T6Ovro93Tzn+//N+0tbeRHcrGEXagGPtnHAgEaGxspLGxMbFNKZVYqzM57JSqTiGEEEIIMRk13jQiMfuUUvu3bt26df/+/ekeyqx7+umnAdi9e3daxyEWrp6eFzh46ENMuLDfFBQUvI7q6tvw9R/h+IkvneVaBtau+TolJe+Y8evNpYE9e2j6m4/AVJakMBio+MmPse/aNWbXkvns9dTr62we+wOcmeH3yIK1YM7UH6eEIKMCkYn2TXf7lM+Z6PhzfP1R19I0hXfIhcfnpsNfiMdXSNdAPjHt7BWEFjVAofkUheZa3PGb3dg75jjtlj1EzdWp09jj63Zqoel9tlWGcUzQaS60YXRloAzpDbEC4SinO/2JoLPG46e2w0dTzyDT+ZHJFF8TdHj6+qp44FmZZ8dsXFph57BoLMoXn/8iD9c/jDFmJCucxRvy38CmjE2Jqs5gcGyV8UQyMzPHdGAvKCiQqs7z3JL5t0+IRUg+f0Kkx1L/7G3bto0DBw4c0DRt23TPlUpNIcScy829hLVrvjblMDIr+wLq639AR8dDiT2dnY/S2fnoFF8xxvETX8RqLVmQFZv2Xbso/uev0PZPt08ebBoMFH/1n8cNNJeU3Gq45Db99tQ34Jk7p3+N5PU6z0MKyInf1gA4IGo30RWuxhNeiSe8ko7wSrzR0jHnhjQ7LaHNtIQ2J7bZDV2JgLPQXEuh+TSWhmcx7VyHKS+TzLV5iWO1mEa0L6gHnJ5BjvzuNE4j5NnNaMGx1aMAWjBKqNlHqNmX+j7MhkTAObxup9ltw5hjnbew02o2sr4km/Ulqc2rhkJ62DkcdO451kCrP0bn0PhJZySmUdvhp7bDD6+ObDcbFcvyHSlB50q3k8pcG6ZFHnYaDUa+dunXCMfCPNb4GL0ZvTzgewB7lZ1PXf8pAL2qc1RTop6ennGvNzQ0RENDAw0NDYltSiny8/PHhJ1Op1OqOoUQQgghzjMSagoh5kVJyTuxWkupb7gHr3fvmP0u10VUV30iEUJu3HAXfv8nqKu/i87OR2bwijHqG+5ZkKEmgOvtb8dcWkrXvT9k8JVXxuy37dhB/sdvXfqB5mgG+WdpthhVBLelFrdlpAN9IGanM7wiEXR6wqsYirnGnDsQy6cumE9dcPjvX4yc3wZx1xzHHV+fM7fUjtFoQBkUphwrphwrrM7l0AM1AHz8W5cT7Q8RSerEHvboNy0QGXfMWjhG+Iyf8Bl/6g6TAXNB5kjQGQ89TbmZKOP8BFmZFiMbSrPZUKqHnU9ntgNw4cWXcqrDn7JeZ43Hzxnv0LjXCUc1Tnp8nPT4gLbEdovJwLJ4Zedw0LnK7aQi14YxzdWr02EymPjm5d8k8nSEp5qfAuDfX/13zEYzt26+FZfLhcvlYs2aNYlzQqEQHR0dKWHnRFWdmqbR2dlJZ2cnr702sjZvZmbmmKZEBQUFmEzyPUUIIYQQYqmSn/SEEPMmN/cScnMvmXKDH4djNZs23oPPd4ya2q/h9b40rdfzevfi99cs2OZB9l27sO/aRbC2luZbbiV85gwAxd+8E9eb35zm0aVJhvPsx4xn5ydhw1tJaS6UMld4Cttncs6Utp/rtaZynaldy6pplNc8QvmBnycO88fy8YRX4Qnp1ZwdkeVENOuoaxvo7c+k98U2TryoB3FGs4GCcgfuqmwKq524q7LIys9MnKGUwpSdgSk7A+uqkfVANU0j5g/rU9g9A/p09niVZ2xw/LCTSIxw2wDhtgFSokKj0sPOUdWdprxM1DytZ2mzmNhU5mJTmStluz8YiYedPmrafdR06KFnW9/YpjqgNzQ60e7jRHtq9Wqm2cjGsmy2VLjYWpHD1oocCpwZc/V2ZoXZYObbV3ybzzz1GZ478xwA9x66F7PBzEc2fmTM8RaLhbKyMsrKyhLbNE3D6/WOaUo0WVVnfX099fX1iW3DVZ2jw06HwyFVnUIIIYQQS4CEmkKIeedwrJpW0Oh0rqOw4Npph5oAvb0vLthQc1jGypVYqqoSoaYpNzfNI0qjZVfM7Lyt74PCtbM7lqUqtxrioaZS4DR24TR2scKqNwuKaQZ6I2V60Bmv6OyJVKCRuj5nNByjva6f9rqR5kAZ9pEfK7wdg7gKbWNeXimF0WnB6LTAClfqNf2hxFqdiXU7PYPE/OHx30tUI9w+SLh9MDXsNChM+VbMbnsi8DS7bZjy5y/sdGSYuKDcxQXlrpTt/YEwtYmqTn29zhqPD0//+GtNDoWjvFzfw8v1I2FeWU5mPOB0saUih7XFWQuuKZHFaOF7V36PTz3xKfa07QHg+we+j9lg5oPrP3jW85VS5OTkkJOTk1LVGQwG6ejoGBN2hkKhMddIrup89dWRNQBsNtuYpkRS1SmEEEIIsfjIT29CiEUhEvGf/aBZPG++GWwj4U9sYDCNI0mzwrVQeQk0vjD1cyovlUBzOs7yNTaoGHnmJvLMTazjcQDCZbvp3P1zvdt6vd5x3dc9tuIwODBSafnf/zT2lxCf+NFVkw7N6LBgdFjIWOZK2R4dCBPpHNWgyDNItH9skAVATCPSMUSkY9QUcAWmvMxEg6JEdWdBJsp89qZKsyHLamZbZQ7bKnNStvcNhuMB53Andv1xp29s2NnSO0RL7xB/PNwKQIbJwMbSbLZWjgSd7qzR1bbzL8OYwfev+j6ffOKTvNz+MgDf3vdtzAYz71373pldMyOD8vJyysvLE9tisViiqjM57OztHdvwCmBwcHBMVafBYEis1ZkcdjqdM6weF0IIIYQQc05CTSHEomAyOWZ03sDAqVkeydxICTUHz+NQE+CKz8N/3gTaFDpqKwNc8fdzP6alZppfY/NVf0vJMhclSZWVg/0hfvr556f1sk/+4jhVm/IpX5uLOWPqIaLRbsZozyajKrV5TywQSQSc4aS1O6PeCTpsaxDpGiLSNUTgWPfIdgXGXOvIep3DlZ0FNgzTGOfZtHxBn4pddudlY/Zl28xsr8ple1VqpbanP8DBpl4ONHk52NTLkZY+gpHUP7dgJMa+xl72NY6EeKWuTLbEA86tFS7Wl2SnpZoz05TJD676Abc+fisHOg4A8I2Xv4HJYOKdq985K69hMBjIzc0lNzeXtWtHfsERDAZT1ugcDjvD4bGVv7FYjI6ODjo6OlKqOu12+5imRPn5+VLVKYQQQgixAMhPZEIsYN951xsB+Oyv/5TmkaRfTs7FMzrP0/EnSnresWAbBg0z2CXUTFi2G278Pjx42+ShmzLAjXfpx4vpmYWvsS3LklJ5qWkafZ1DiQpNk9lAJJx67eMvtnH8xTaMZgPla3Ko3lxA5cY87NkzWyPSYDWRUZFFRkVWyvZYMEKkYyhlvc5wxyDRnvHXs0SDaHeAaHeAwPHUNRuNrgw94HQnBZ7jTKufK+4sK9dtKOa6DcXA8Nqb/RxojAedzb0094xtSnTGO8QZ7xB/OqKvgWoxGdhQkqVPW6/MYUuFi+LszDHnzQWb2ca919zLzY/dzOHOwwB89aWvYjaYuWnlTXP2uhkZGVRUVFBRUZHYNlzVOboDu9frHfcaAwMD1NXVUVdXl9hmMBgoKCgYE3Y6HDP75ZsQQgghhJgZCTWFEIuCw7EKl+tCvN6Xp3mmtqC7oA8z2O2Jx7GBgTSOZIHY+gFwVcAz34LGcaoBKy/VKzQl0Jy5Wf4aK6VS1tD8m+9cRvOJXhoOd1L/ajdDSVPFo+EYDa920/CqXi3prs6ialM+1ZvyyS2xn3MTF0OGCUu5E0t56tThWChKpHMoJeiMdAwS6R4a0zMqMVZvUK/8PJk6lbnSaiBsB6/vtB54uu2YCzIx2MznNPazsZgMicZEH4p/W+vwBTjY5OVgk5cDTb0cafESGBUohyIxDjR5OdDkhef1adfF2dZEA6ItFTmsL8nCOkfT8O1mOz+85od89NGPcrT7KAC3v3g7JoOJG5ffOCevOZ7kqs5169YltgcCgXE7sE9U1Tm8P5ndbh/TlCg/Px+jcX6WNhBCCCGEON9IqCmEWDSqqz7JwUMfZML0YQILvQs6yPTzcS3brd86jkPdMxD06d3Rl10ha2jOluSv8Us/ggM/07eXbIW/fuicLm2yGKmOB5W7Yxqexn4aDndRf6SLntbU4N5Tr6/VufcPdWTlW6neVEDV5nyKV2RjNM7elGmDxYil1IGlNLWiTgvHCHemrtcZHg47JyhkNQcU5gD4X2hNfQ2nZWS9zqSu7Eb73IWdhU4rr19fxOvXFwEQjsY42e7jQFNvIuhs7B77faWtL0Dbq+08/Go7ABajgXUlWSOd1itzKMm2zlqncKfFyX3X3sdHHv0IJ3pOoKHx5Re+jNlo5rqq62blNWbKarWOW9XZ29s7pinRZFWdp0+f5vTp04ltw1Wdo8NOe9IvsoQQQgghxMxIqCmEWDRycy/BXXgDno7pT8df6F3QJdScROFaCTHnWuFauPS2kVCzv3XSw6dLGRRF1dkUVWez8y3L6escpOFIN/VHOmmt7UOLjfyior8rwOEnmzn8ZDMZNhMV6/Oo3pxPxfo8MjLn5scWZTZgKXFgKRkVdkZiRLqH9PU6k5sUdQ1BdPxfrsR8IYK+EMFT3pTthqRQc+hYNxlVWXNW1Wk2GthQms2G0mw+sEvf1uUPcigecB6Ir805GIqmnBeKxjjU7OVQs5efvtAAQKEzIx5w6utzbizNPqdqzuyMbO6/9n4+/JcPc8p7ipgW4wvPfgGzMnN15dUzvu5cMBgM5OXlkZeXN6aqc/Q6nR6Ph0gkMuYaE1V1OhyOMU2JpKpTCCGEEGJ6JNQUYhHwetpxuYvSPYwFwW5fOaPzFnoXdJUSasr0c5EGrkowZkA0CP52CPSBNfvs581AdoGNzVfb2Hx1OYGBMI2vddNwpIvGo92EAyNBW3AwQu0rHmpf8WAwKkpXuajaVEDVpjyy8uZ+PUhlMujTyt2pVXVaNMaLf34Wix9WFywbqe7sGoTIBGHnwMg05u5fHBuzv+gLOzC55q5jeb4jg2vWublmnRuASDTGSY8vUcl5sMlLfdfY7z0dviCPHG3nkaN6NafJoFhfksWWipxERWdZTua0qjlzrDn8+HU/5sN/+TB1fXVEtSife/ZzfG/399hdvntW3u9cslqtVFZWUllZmdgWi8Xo6ekZE3b29fWNew2/34/f70+p6jQajYm1OpPDztmo6rzjjjtS7oUQQgghlgIJNYVYIIabAo3nJ5/+yJht52vzoJl2QZ/pefNFKjVF2hmMkLcCOvT1DumqhbLt075McvOgqbDazay+qIjVFxURjcRorfFSf6SL+sOd+HtHupjHohrNx3tpPt7Lc7+GvDKHPr19cz4FFc5ZmyI9FcpoIOyAsAOydo9MV9aiGpHeABHPIN3/OTa4nEz7na9gdGVgqcoioyqbjOosTAU2lGFu3pfJaGB9STbrS7J53049nOsZCHGouZcDjXoDokNNXgZGVXNGYhqHW/o43NLHz17Ut+U7Mtia1Gl9U5mLTMvkFYd5mXn8+HU/5q//8tc09jcSiUX4u6f/jruuuotLSy+dk/c8lwwGA/n5+eTn57N+/frE9qGhoTFBZ0dHx7hVndFolPb2dtrb2zl8+HBiu9PpHNOUKC8vT6o6hRBCCHHek1BTCLGozLQL+kzPmy8pjYIk1BTpkr8yKdSsmVGoeS6MJgPl63IpX5fLZe9aSVeLn4YjXdQf7qKzyZdybHeLn+4WP/sebsDuykg0GipbnYPRPHvrcE6HMirM+ZmY8zMpu/OyxHYtphH1Bmn/11cAMJc7CZ/xQyy1qjPqDTJ0qJOhQ50AGGwmLJV6yGmpzsJS4kCZ5u695dotXLXGzVVr9GrOaEyjJqWas5fTnWOrObv8QR495uHRY/oUa6NBsbbYGW9ApFdzVuTaxgTPBbYCPdh85K9p8bcQjoX5zFOf4e6r72Zn8c45e5/zKTMzk6qqKqqqqhLbYrEY3d3dKQ2J2tvb6e/vH/caPp8Pn8/HqVOnEtuMRiOFhYVjwk5b0i/IhBBCCCGWOgk1hVggRldeaprGd9890hH28r/6a3a86W3zPawFZyZd0F2uixb0epowqlJTup+LdMlP+px0nkzfONC7qReUOykod7Ljhmr8vQE94DzSRcvJXmJJ07wHvEGOPnuGo8+ewZxhpGJdLlWb86nckEemw5LGd6FTBoUpd2RqufsTFxALRQk1+Qg19BFs6CfU2I82qmN5bDBC4HgPgeM9+nXMBr2re1UWGdXZWCqyMGTMXbWeHk5msbY4i/depFekegdDHGz2xrut69WcvmBq1WE0pvHamX5eO9PPL/Y0ApBnt7AlUc2Zw6aybOwZJorsRfzk9T/hQ498iLaBNoLRIJ964lPce8297CjaMWfvLZ2GmwcVFBSwYcOGxPbBwcGUDuzt7e10dnZOWNXZ1tZGW1tbynan0zmmKVFubu6cvychhBBCiHSQUHMOKaWcgDP+1ByLTdBCVYhxjK5oOfDIg2y9/s0YTfKx1bugf4gJ2xKnMFBd9Yk5HtG5M9ikUlMsAAWrRx531aZvHONw5FjZcEUZG64oIxSI0Hysh/ojXTS82kVwYCT0CQejnD7YyemDnSgFRcuzqd5cQPWmfFzuhVPFZrAYsa5wYV3hAvR1OsOtAwQb+gjW9xNq7CM2kBpmaeEYwbo+gnV9+GgGA5hLHGRUxkPOqiyMcxziumwWrlxdyJWrCwGIxTROdfo50DjSab22Y+waxt0DIR4/3sHjxzv0969gTVGW3oCoPIevXHg3X957Kx2DHQSiAT7xxCe479r72FK4ZU7fz0Jis9nGVHVGo1F6enpSGhK1t7fj8/nGvcZwVWdt7cjn15T0c8PRo0eprq6Wik4hhBBCLAmSjsytzwK3Dz8Z3flSiOnwd3dR89LzrL10d7qHkna5uZewds3XOH7iS0webBpYu+br5OZeMl9Dm7HkSk1tQEJNkSb5SY24utJbqTkZi9XE8q2FLN9aSCwao72uj/rD+jT1vs6hxHGaBm2n+mg71ceL/3eKnCJbYpq6e1k2hjlar3ImlDFehVnuxHmZXq0f6Rwi2NBHqL6fYEMf0aQ1RgGIQbjFT7jFj/8FvWO9KT8zUcmZUZWFMdc6p+uNGgyKVW4nq9xO3n2hXs3ZNxTmcPNwp3Uvh5p66Q+kBrQxDY619XOsrZ//eqkJAFfWhzGX/JCw6mMoMsQtj93Kj1/372ws2Dhn41/ohpsHFRQUsHHjyNdhcHBwTPf1jo4OotHomGskV3r+z//8z5j9X/rSlzCbzXPzBoQQQggh5pCEmnPrO8D98cePuN3u8/encjEr9v3pd6y55Ip5bYixUJWUvBOrtZT6hnvweveOe0x11ScpKXnHPI9sZgx2aRQkFoC8pFCzpx4iITClf/r2ZAxGAyUrcyhZmcPFb1uB1zOYCDjb6/sgadnK3vZBetubOPhoE5lOM5Ub9YCzfG0u5jmcxj0TSinMhTbMhTa4sBiASF9Qn65e30+ooY+wZzDl/QFEuoaIdA0xuE//Raohy0JGvPmQpSoLc5F9zpoPDcvONHP5qgIuX1UA6NWcdV3+RAOiA41eajp8aKPG7u13YQj8DZmV92Mw+RmMDPBXf/obLnV8mSurt7ClIodl+fYFFUani81mo7q6murq6sS2aDRKd3c3995777Sudeedd1JeXs6yZctYtmwZxcXF0oRICCGEEIuChJpzSNM0H+ADUEqFDYb0NC4Qi5/JbCESDtFRf5qW469Rvk7ycdArNnNzL8Hvr6G390UiET/evlfo6XkegL6+A2ke4dRJ93OxIFhskF0BfU2gRaG3PnVK+gKnlCKnyE5OkZ2tr69ksD9E42t6wNl8vIdIaKSye8gX5sSLbZx4sQ2jyUDZ2hyqN+VTtSkfe3bGnIwvuXnQTJiyMzBtLsS2OT71ezBMsMlHqD6+LmeLD6KpSWGsP8TQkS6GjnQBoKxGMiqzsMQ7rFvKnHPafAj0as4VhU5WFDp5545yAHyBMIeb+xINiA42e/EOhomFChlq+giZFfdjMA2iGYZ4tv9rPPLgR4kFi8nONOtrc5bnsLXSxeZyF1lWqTKEkeZBd9xxR8r2gYEBvvWtbwFQWlpKa2srWlKiHI1GaWhooKGhgSeffJKMjAyqq6tZtmwZ1dXV5Ofnyy9ThRBCCLEgSagpxAI23DzosX+/myOPPwLo1ZoSaqZyOFYlGgEFAq288OIVQIye3ucZHGzAZqtK6/imYnSoqWma/CdSpEf+Sj3UBL1Z0CIKNUezZVlYe3EJay8uIRKK0nKiV1+H80gXg/2hxHHRSIzGV7tpfLUb/vskhVVZVG/Kp3pzPrkl9gX7WTTYzGSuySVzjd4IRgtHCTX79XU5h5sPBVOnI2uBKIGTvQRO9uobTApLmTPRYT2jMguDde5/PHRazVy6Mp9LV+br49I06roG4utyVrD3TCYe27+hjEMo0yCZFT9mqPFj9A25efpkJ0+f1DvEKwUrCx1sjTcg2lLhYnmBQ6o5k9jtI2s2f/SjH2VoaIjGxkbq6uqoq6ujq6sr5fhgMMiJEyc4ceIEoDcfGq7irK6uJisra17HL4QQQggxEQk1hVgEtt3wlkSoWbf/ZXpaz5BbUprmUS1MVmsJ+Xm76ep+EoAzrb9i5YovpHlUZ6eMRpTVihYIgKahDQ2hpJGDSIeC1XD6Cf1xV016xzKLTBYjVfFKTC2m0dHoo/5IJ/WHu+hpHUg5tqOhn46Gfvb+sY6sfGtiHc7ilS6MxoU760KZjWQsyyZjWTYAWkwj3KY3Hwo19BOs7yPmD6eeFNEINfQTauiHpwEF5iJ76rqcWXNTuZoydqVYXuBgeYGDt28rAzbyStt6Pv7EzQSiAxhMA9irfoy/4WNooYLEeZoGNR4/NR4/v3qlGYAsq4kLKnLYUu5ia2UOF5S7yM6Uas5hmZmZrFmzhjVr1gDQ399PXV0d9fX11NXVjWlC5PP5OHz4MIcPHwYgPz8/EXJWVVVhtVrn/T0IIYQQQoCEmkIsCrklZSzbuoO6A68AcODh33PNRxZ+R+90KS19TyLUbGv7X5Yv+1sMhrn/T/m5MthsRAMBQK/WNEioKdIhpVnQ0gk1kymDwl2dhbs6i51vXk5f5xANR7qoP9JFa60XLTYyNbe/K8CRJ1s48mQLlkwTlRvyqN6UTzSkYbQs7GpAZVBYSh1YSh1wSSmaphHtDiR1WO8n0jWUepIG4bYBwm0DDOxpA8CYa9XX5Yx3WDflZ85L9eqO4s38+PX38bFHP8ZgZBCMPirW/YL3Vd5Jk8fGgaZeTrT7iMZSp9z3ByI8W9PJszWdiW0rCh1srXCxJV7RuaLQgVGqOQHIysriggsu4IILLkDTNLq6uhJVnA0NDQSDqQ2qurq66Orq4uWXX0YpRWlpaSLkLCsrS+m2LoQQQggxl+SnDiEWie1vvCkRah595kkufuf7sGVlp3lUC1Ne3hVkZBQTDLYRDvfS0fEXiorelO5hnZXBZiPa0wPIupoijfKTppsv0VBztOyCTDZfXc7mq8sJDIRpOtpN/ZEuml7rJhQYmb4dGopQ+4qH2lc8oMBeqHFEa6ZqYz5Z+ZlpfAdTo5TClJ+JKT8T+/YiAKK+0EiH9cZ+wq3+Mc2Hoj0BBnsCDB7oAMDgMOvrcsYrOc3FDpRxbgLCzQWbufeae7n18VsZigzRE+zkN2e+xM+u+xmljg0MhiIcadHX5jzQ6OVgUy/dA6Ex1znV4edUh5/f7GsBwJlhYnO5KxF0bqlw4bIt7KZY80Eplei2ftFFFxGNRmlra0uEnM3NzSkd1jVNo6WlhZaWFp599llMJhOVlZWJkNPtdiNrygshhBBirkioKcQiUbZuI4VVy+loOE0kFOTIY39m59vene5hLUhKGSkteTd19d8D4MyZBxZNqDlMQk2RNvmrRh531erzexfompJzwWo3s+rCIlZdWEQ0EqO11qt3Uz/Sib8nqWJNgwEPPPfrWp77dS15pQ6qN+vrcBaUO+e8w/hsMTot2DYWYNsY71QeiBBq8hEcbj7U7INILOWcmD/M0NFuho52A6AsRiyVzkSHdUu5E4Nl9rpnb3Nv4+6r7ubjT3ycYDRI+0A7f/OXv+Fn1/2MInsRO5flsXNZHqCHbM09Q4kGRAeavBxv6ycyqprTF4zw/Kkunj81sp7ksgJ7ogHR1oocVrmdS6aac3TzoKkyGo2UlZVRVlbG5ZdfTigUoqmpKTFdva2tLeX4SCTC6dOnOX36NKBPdR9ei3PZsmXk5uae61sRQgghhEiQUFOIRUIpxfY3voWH7/4OAAf/8ie2v+ltmMyyTth4SkreSX3DXWhaFG/fK/gHanHYV579xDQyJDVzkFBTpI09HzJzYKgXQn7ob4Xs83MNX6PJQPnaXMrX5nLZu1bSfcZP/WG90VBHY+q6g91n/HSf8bPv4Qbs2ZbE+p1la3IwmWcv4JtrBqsJ66ocrKtyANAiMUJn/ITiU9aDDf1ogUjKOVooSrDWS7DWq28wxqe9V+mVnBlVWRhs5/Zv1YXFF3LXlXfxqSc/RSgW4oz/DB/+y4f56et/itvuThynlKIiz0ZFno23bNH/3g6Forx6pi8l6Oz0Bce8Rl3nAHWdA/zfAb2a024xxqs59UrOLRU55NrP72pOi8XCihUrWLFiBaB3Vm9oaEhUcvb29qYcPzQ0xNGjRzl69CgALpcrpelQchMjIYQQQojpklBTiEVk1a7LePaBn+Hv6Wawz8uJ559mw5XXpntYC1JGRiH5+dfS2ak3WDpz5pesXvVPaR7V5FIqNQcGJjlSiDmklF6t2bxXf95Vc96GmsmUUuSXOckvc7Ljhmoee/gpfK1gHsql5WQPschIJeBAX4ijz7Vy9LlWTBlGKtblUr0pn8qNeWQ6FlcopkwGMir1rujOK/TmQ5GOwZFKzoY+on2jpntHNUJNPkJNPvzP6ptMblvqupyu6TeXubj0Yr535fe47anbiMQiNPua+cijH+Gn1/2U/Mz8Cc/LtBi5sDqXC6vjXeI1jZbeIQ42eznQ2MvBZi/HWvsIR1OrOQdCUV483c2Lp7sT26rybHrIWak3IlpT5MS0gJtHzTW73c769etZv349AL29vYmGQ3V1dQyO+gWd1+vlwIEDHDhwAAC3250IOSsrK7FYFtfnQwghhBDpJaGmEIuI0WRiy3U38twDPwNg/0O/Z/3ua+alYcNiVFb63kSo2d7+W1Ys/3uMxoW77l1qqCmVmiKN8lemhprLr0zveBYgs02RuwJ2795MKBCh+XgPDYe7aHi1m8DASIfxSDBK3cFO6g52ohQULc+melMB1ZvzcbkXXzMwZVCYi+yYi+w4dpXozYd6gyMd1hv6iHQMjTkv4hkk4hlkYG87AEZXBhlVI+tymgpsU5qyf3nZ5Xzniu/w2ac/S0SL0NDfwEf+8hH+47r/INc6tanNSinKc22U59p40+YSAALhKK+d6eNgk1dfn7OpF0//2GrOhu5BGroH+e3BMwBkmo1sKstma2VOoqIz37HwG9PNlZycHHJycti6dSuxWIyOjo5EwNnY2Eg4HE453uPx4PF42LNnDwaDgfLy8kQVZ2lpKUbj4qlyFkIIIcT8k1BTiEVm09XX8dL//YpwMEBXcyONRw5StXlruoe1IOXk7CIzs4KhoSYiER8ez0OUlLw93cOakKypKRaM87BZ0LmwWE0s31LI8i2FxGIa7af7qD/SRf3hTvqSAj5Ng7ZTfbSd6uPF357C5bZRvSmfqs35FC3LxrAI129USmHKtWLKtWLfqk8Djw6E9enqDfp09fAZP4xa0zLqDTJ4qJPBQ3qHcoPNhKVypJLTUuJAmcavgLyq4iq+efk3+fyznyeqRTndd5qPPvpRfvK6n+Cyumb0PqxmI9urctleNVLN2dYXGGlA1NzL0TP9hKKp64sOhaPsre9hb31PYltFri2l0/qaYifm87Ca02AwUFRURFFRERdffDGRSIQzZ84kQs6WlhY0beTvRSwWo7GxkcbGRp566iksFgtVVVWJSs6CggL5Ja4QQgghUkioKcQiY3U42HDVtRz884OAXq0poeb4lDJQWvIeTp3+JgBnWh9Y2KGmXUJNsUCkNAuSUHM6DAZFyUoXJStdXPK2FfS2DyTW4Wyr60vpLO71DHLwsSYOPtaE1WGmamMe1ZsKKFubg8W6eH9EM9rNZK7PJ3O9PiU8FooSauqPV3L2E2rsRwuPaj40GCFwvIfAcT0cVGYDlnJnopLTUpGFIWOkau91Va8jHAvzxee/SEyLUdNbw8ce+xg/fv2PybJknfN7UEpR4sqkxJXJGzfp1ZzBSJSjrf36lPUmvdN6a19gzLlNPYM09Qzy+0OtAFjNBjaVuthSObI+Z6Fz+tPvF7vhzuiVlZVceeWVBAIBGhsbE9PVOzo6Uo4PhULU1NRQU6N/D3I4HFRXVxMKhcjJyUnHWxBCCCHEArN4f2IW4jy29Q1v5tAjD6FpMRoOH6CrqYH8iqp0D2tBKi5+G6frvoemhejvP0y/7zWynBvSPaxxSaWmWDAKkkLNTgk1z0VOkZ2cIjtbX1/JkC9Ew6vdNBzpoulYN5HQSLAX8Ic5saedE3vaMZoMlK3JoWpTPtWb8rG7Fvd0ZoPFiHVFDtYV8eZD0Rjh1gGC8eZDocY+YgOjmg+FYwTr+gjW9eEDMIC5xKGv7xmv5rxh2Q1EYhH+3wv/Dw2N4z3HueWxW7j/2vtxWByz/j4yTEa2xqsvh7XHqzmHGxC9eqaP0Khu8YFwjJcbeni5YaSasywnM17JqQeda4uzsExQmbpUWa1WVq9ezerVemW4z+dLWY+zv78/5Xi/38+rr76aeF5bW5uo4qyqqiIzc+EuLyOEEEKIuSGhphCLkMtdxIoLd1K790UA9j/8B15/y21pHtXCZLHkUVh4HR7PHwG9YVDWmq+leVTjS+l+Lo2CRDq5KsFogWgI/O0Q6ANrdrpHtehlOi2svbiYtRcXEwlFaTnZS/0RvYpzMKnZTjQSo/G1bhpf6+aZB05SWOmkenM+VZsKyCu1L/opuMoYr8Isd+K8TJ/qHekc0tflrNfX5Yz2jlrPMgbhFj/hFj/+F/QKSFNBJpdXreMH+d/gX1q/Rbu5m1e7XuXWx2/lvmvvw2ae+zVLi7KtXL+xmOs3FgMQisQ41tafaEB0oLGXM96xa4y29A7R0jvEg4f195JhMrCxNJst8ZBza2UO7qzzq5rT6XSyadMmNm3ahKZp9PT0JALO+vp6AoHUqtju7m66u7t55ZVXUEpRXFycCDnLy8sxm81peidCCCGEmC8SagqxSG274aZEqHn8uae49N0fwO6S6VjjKS19byLU9HgeZOWKf8Rkmv0qnnMllZpiwTAYIW8FdBzTn3fVQtn29I5piTFZjFRtzKdqYz7aezQ6mnzUH+6k4UgX3WdSf6nR0eijo9HH3j/W48yzJtbhLFnpwjiFtRrvueVJAD7xo6vm5L2cK6UU5kIb5kIbXKiHg5G+oL4uZ73eYT3sGUyZug8Q6Rwi0jnEcrL4KV+ly+TlqO0UR3tO89XB/8f/e9NXsVnmtxmTxWTggnIXF5S7Ets6+gMciE9XP9DUy5GWPoKjqjmDkRj7GnvZ19gL1ANQkm1lS1IDovUlWWSYzo/GOUop8vLyyMvLY8eOHcRiMdra2qirq+PAgQN4vd6U9Tg1TaO1tZXW1laef/55TCYTFRUViaZDxcXFGAznVyWsEEIIcT6QUFOIRap09VqKV66mrfYk0UiEQ48+xCXvfF+6h7UgubK3Y7evZGCglmh0gHbPHykrfW+6hzWGSgk1pVJTpFn+qqRQs0ZCzTmkDAp3VRbuqix2vnk5/V1DiQrO1hovsaQmO77uAEeeauHIUy1YMk1Urs+lanM+levzyLAtnco0U3YGps2F2DYXAhAbDBNs8hGq1xsQhVp8EE1NOfMjLq7o384V/dvBA62H9uBcVkBmdQ4Z1VlYypwTNh+aS4VZVq7bUMR1G4oACEdjHG/rT+m03twztpqztS9A65E2HjrSBoDFaGBDaVaiAdHWShfF2efHlGuDwUBpaSmlpaVEo1Gi0SjV1dWJSs7W1taU4yORSGIf6FPdq6urE5Wcubm5i77iWQghhBASagqxqG274Sb+9G93AnDo0Ye58M1vx5xxfk1XmwqlFKUl76am9qsAnDnzAKUl71lw/6GRSk2xoCQ3C+o8mb5xnIey8jPZfFU5m68qJzgYpvFoNw2Hu2h8rZtQIJo4LjQUoXZfB7X7OvQGRatciXU4s/KXVthlsJnJXJNL5pp4d/JwlFCzX1+Xc7j5UDCaco41YiFc00e4pk/fYFJYypxkVGVjqc4iozILQxoaMpmNBjaVudhU5uKDF1cB0OkLJtblPBiv5hwKp76fUDTGgSYvB5q8/CRezVmUZWVrpYst5XrIub4kG6t56VdzGo3GREAJMDQ0RENDQyLI7O7uTjk+EAhw/Phxjh8/DkB2dnZKyOlwLLzZG0IIIYQ4Owk1hVjEVl64i6wCN/2dHgK+fo49+xSbr31Duoe1IBUV3cSp098iFgvg9x+nv/8Q2dlb0j2sFBJqigWlYPXI467a9I3jPJdhM7NqRxGrdhQRjcRoPeWl4XAX9Ye78PWMrDEYi2m0nOil5UQvz/+mlrxSO9WbC6jalJ/G0c8dZTaSsSybjGX6Wq9aTCPcpjcfOnHoEBmtGrnRUevARjRCDXoXdp4GFJiL7Fiq9OZDGVXZGLMs8/5eAAqcGbxufRGvW69Xc0aiMU60+1KCzobusf8utPcHePjVdh5+tR0As1GxriQ70YBoS4WLUlfmgvsl3mzLzMxk7dq1rF27FoC+vr6U9Tj9fn/K8X19fRw6dIhDhw4BUFhYmAg4KysrychY3M25hBBCiPOFhJpCLGIGo5Gtb3gTT//i3wHY/9Dv2XT161GybtQYZnM2bvcbaWv7XwAaGn5Ebu4uIhE/JpODnJyLcThWneUqcyu5UZA2IKGmSLP8lSOPu6QD+kJgNBkoX5NL+ZpcLn3nSrrPDNBwpJP6w110NPpSju0+M0D3mQH2PdyQ2DbgDS76TuoTUQaFpdSBpdTBjktKufvA3fxx/2/ZMLicDYMr2B7eSO6gM/UkDcJtA4TbBhjYo0/xNuZZUzqsm/LTEwiajAY2lGazoTSb9+/St3X7gxxs8nKwuZcDjV4Ot3gZDKVWc4ajGoebvRxu9vLTFxoAKHRmpDQg2li69Ks5s7Oz2bJlC1u2bEHTNDo7OxMhZ0NDA6FQKOX4jo4OOjo6eOmllxJT3YdDztLSUkwm+S+TEEIIsRDJv9BCLHIbr7qWF//nvwkNDdLbdoa6g/tYvu3CdA9rQSotfW8i1Ozqfpyu7sdT9rtcF1Jd9Ulycy9Jx/Aw2EZCzaisqSnSLW/FyOOeOoiEwJSeKjYxllKK/DIH+WUOtl9fzYA3SP2RLp55YOKlAn72hRfGbFuozYPO1Se2fIKwFuY/XvsPHnO9BMDbit/C59yfJNKoT1sPtw2MaT4U7Q4w2B1g8EAHAAaHmYzKLCzV2WRUZWEudqCM6al6zHNkcM06N9escwN6NWeNx8+Bpl497Gzqpa5r7L8dHb4gfznq4S9HPQCYDIp1JVlsrcjhilUF7Fqet6RDTqUUhYWFFBYWsnPnTqLRKGfOnKG+vp66ujqam5uJxUYaN8ViMZqbm2lubuaZZ57BbDZTVVWVmK5eWFgoTYeEEEKIBUJCzTmklHICw2UB5uQfmISYLZZMG5uuuY59D/4WgP1/+p2EmhPw+05Mut/rfZmDhz7E2jVfp6TkHfM0qhHJ08+lUlOkncUO2RXQ1wRaFHrrU6ekiwXF7spgw+Wlk4aa5xOlFJ/Z+hnCsTD/eew/Afi/tt8Tyozy1Td+FZfBSCwQIdTkI1jfR7Chj1CzDyKpKWfMH2boaDdDR/U1GpXFiKUyvi5nVRYZFU5UmgJBk9HAupIs1pVk8b6dlQD0DoQ41OxNBJ2Hmr34g5GU8yIxjSMtfRxp6eNnLzZgsxi5YlUB165zc+XqQnLsS/uXF0ajkYqKCioqKrjiiisIhUI0NjYmKjk9Hk/K8eFwmNraWmpr9WU47HZ7ynqcLpcrDe9CCCGEECCh5lz7LHD78JPRPyQJMVu2XHcj+x/6PVosRvOxV/HUncK9bMXZTzyP9PS8wImTX57CkTGOn/hHrNaSea/YNNhlTU2xwOSv1ENN0JsFSai54I2uvIzFNH748afGPdZgVDz93yfYel0lWXlLq7EQ6MHm32//e8LRML86+SsAHqx7EJPBxB0X34HBasK6KgfrqhwAtEiM0Bk/wfo+Qg39BBv60QKpgaAWihKs9RKs9eobjPFp71V6JWdGVRaGNHahz7FbuHJNIVeu0bvGR2MatR0+vdN6o95p/XRnajXnYCjKn19r58+vtWM0KLZX5nDtOjevW1dERZ5tvJdZUiwWCytXrmTlSn3JDb/fn9J0yOv1phw/MDDAa6+9xmuvvQZATk5OIuCsrq7GZlv6XzMhhBBioZBQc259B7g//vgRt9u9MZ2DEUtXVn4Bq3ddxokXngH0tTWv/9Tn0jyqhaW+4W5gqtXSGidO3s7Fux4/+6GzSBoFiQWnYDWcfkJ/LOtqLkoGw8hU6Rs/tZlXHqqnva4fgFhU4+hzrRx/oY01u4rY9oaqJdc1XSnFP170j4RjYf6v9v8A+N2p32E2mPnyzi+nrJepTAZ9Pc3KLEBvPhTpGIxXcvYTaugj2pe6FiNRjVCTj1CTD/+z+iaT26YHnNXZWKqyMZ3DOqYtX3gOgLI7L5vR+UaDYk1RFmuKsnjPhRUA9A2GOdjcywununj8eAf1SVPWozGNvfU97K3v4V8eOs4qt4Nr17m5dl0Rm0qzU/4+LVUOh4MNGzawYcMGAHp6ehINh+rq6hgaGko5vre3l/3797N//34AiouLEyFneXk5FsvSrnwVQggh0klCzTmkaZoP8AEopcKy/o6YS9tueEsi1Dy55zkue++HcOYtza630+X31+D1vjytc4aG6jlz5teUlr5rjkY1liFzJEyIDQ6iadqS71grFriUZkHSAX2xq1ifR/m6XFqO9/LKQ/W0ne4D9GrOYy+0cWJPO6t3FrHtDZVkFyydajODMvBPu/6JSCzCH07/AYDf1PwGs9HMP+z4hwm/zyqDwlxkx1xkx7GrBE3TiPYG9anqDf0EG/qIdAyNOS/iGSTiGWRgr96R3OjKIKNqZF1OU4ENlcZwMNtmZvfqQnavLuSL16/ldKefR495eOyYh0PNXrSkGfg1Hj81Hj/3PHWaQmcGV69187p17iW/Dmey3NxccnNz2b59O7FYDI/Hk6jibGxsJBJJreZta2ujra2NF154AaPRSHl5eSLkLC4uxmg8P75uQgghxHyQUFOIJaJo+UrK1m6g5fhrxKJRDj7yIJf/1V+ne1gLQm/vizM673Tdd+Y11FQmE8pqRQsEQNPQhoZQMo1NpFP+qpHHXbJW41KglKJ8XS5la3M4c7KXVx5qoDU+lToW0zj+YhsnXmpn9YVutr2hCpd7aXwPMigDX7n4K4RjYR6ufxiA/z7+35iUic9u/+yUfoGklMKUa8WUa8W+VW/WEx0IE2rQKzmDDf2Ez/jGTAqIeoMMHupk8FCnPhabCUtSh3VLqQNlTM8vvpVSrCh0sqLQycd3r6DDF+DJ4x08dszD86e6CEZG3kyHL8gvX27ily83YbMYuXylvg7nVWuW/jqcwwwGA8XFxRQXF3PJJZcQiURoaWlJhJxnzpxBS0qFo9EoDQ0NNDQ08OSTT5KRkUF1dXViTc78/Hz55aUQQghxDiTUFGIJ2fbGm2g5rq/xdOTxR9j5tndjsS6tqYQzEYn4Z3ReONw9/9WaNhvRQADQqzUNEmqKdMpPWkOzqxY0DeQ/4EuCUoqyNbmUrcmNh5v1nKnxAvq06xMvtXNybzurLtQrN3OK7Okd8CwwGox87dKvEYlFeLTxUQB+fuznmI1mPr3l0zMKl4x2M5nr88lcr8+MiIWihJr6Cdb3E2rUb1o4NeWMDUYIHO8hcLwHAGU2YCl3Jio5LRVZGDLSU81X6LTy7gsrePeFFQyGIjxX28Vjxzw8eaKDnoGRqfeDoSiPHG3nkaOp63Beu85NZd7i/7syVSaTiaqqKqqqqrjqqqsIBAKJ9Tjr6+vp7OxMOT4YDHLixAlOnNAbFzqdzpT1OLOystLxNoQQQohFS0JNIZaQ5Vt3kFNcQm9bK8HBAV576jG2vuFN6R5W2plMjhmf29h03/yHmj36f3RlXU2RdvZ8sLog4IWQH/pbIbs03aMS0zS6edBopatzKF2dQ2utXrnZcqIX0DPsk3vbqXm5nRXb3Wy/vorc4sUdWJkMJu68/E4iT0d4svlJAH786o+xGCzcesGt53x9g8WIdUUO1hXx5kPRGOHWAYINffGgs4/YwKjmQ+EYwbo+gnV9+ppFBjCXOMioyj7n8ZwLm8XE69cX8fr1RURjGvsbe3n8uD5NXdbhHJ/VamXNmjWsWbMGgP7+/sRanHV1dfh8vpTjfT4fhw8f5vDhwwDk5+cnQs6qqiqsVuu8vwchhBBiMZFQU4glRBkMbL3+LTzxk3sBOPDwH7jg9TdgMJzf6zfl5Fw843OHhhrx+2twOFad/eBZIM2CxIKilN4sqHmv/ryrRkLNJaxkZQ5v/kwObae8vPJQPc3HR8LN2lc81O7zsHJbIduvrya3ZPGGm2aDmW9f8W0+8/RneLZF7+5z7+F7MRvNfGTjR2b1tZQxXoVZ7sR5GWiaRqRzSF+Xs15flzPaG0w9KQbhFj/hlpFZBsMNg5LNtHnQTBgNigurc7mwOpd/fMOaxDqcjx/zcFDW4ZxQVlYWmzdvZvPmzWiaRldXVyLkrK+vJxhM/bPv6uqiq6uLl19+GaUUpaWliSrO8vJyTCb5r5sQQgiRTP5lFGKJWX/FVbzw6/8k4PfR1+Hh1CsvseqiS9I9rLRyOFaRmVnB0FDTjM4fXpOzt/dFIhE/JpODnJyL5yTolFBTLDj5K5NCzVpYfmV6xyPmXPEKF2+6bQvtdX288lA9TUf16nE0qN3XQe3+DpZvKWTHDVXklc68Ej6dzEYz3939XT795Kd5sVX/Hv/9A9/HbDDzwfUfBOBU7yn2tu/FH/LjsDi4qOgiVuSsOKfXVUphLrRhLrTBhcUARPqChBr66Pnl9NatjXgDmFzzX8kn63DOjFKKgoICCgoKuPDCC4lGo7S1tSWqOJubm4lGo4njNU2jpaWFlpYWnn32WUwmE5WVlYlKTrfbjTQhFUIIcb6TUFOIJcacYWXztdez93e/BmD/n35/3oeaAJUVt3Di5BdndG5d/feJ1H51zHaX60Kqqz5Jbu7sfX0N9pHqp9iAhJpiAUhZV1OaBZ1PipZlc+OnLqC9vo99DzXQ+Fq3vkOD0wc6OH2gg+VbCth+QzX5ZYsv3MwwZvD9K7/PJ5/4JHvb9eD+2/u+TYuvhVpvLfs9+8ecs829jVs238LO4p2zNg5TdgamzYXYNhcCEBsME2zyEarvw/dMy4Tntd/5CpZyJ5kb88nckI8pNz1TlWUdzpkxGo2UlZVRVlbG5ZdfTigUorm5ORFytrW1pRwfiUQ4ffo0p0+fBiAzMzPRcGjZsmXk5uam420IIYQQaSWhphBL0AWvv4F9D/4f0UiE1prjtNacoGTVmnQPK61KS9/F6bpvEw73TPvcSKR/3O1e78scPPQh1q75OiUl7zjXIQKjKjUHBiY5Uoh5ktIBvSZ94xBpU1SdzRs/uZmOxn5eeaiBhiNdiX2nD3Zy+mAn1Zvz2XFDNQUVzjSOdPqsJit3XXUXtz5+Kwc6DgDwq5O/mvD4/Z793PzYzdyx6w5uWnnTnIzJYDOTuSaXzDW5iVDTddMKhl7rInjam9JdPdTsI9Tso+/hesxlDmwbC8jcmL6AU9bhnDmLxcLy5ctZvnw5AIODg9TX1yemq/f0pP78MjQ0xLFjxzh27BgALpcrpemQ3T71oPiOO+5IuRdCCCEWCwk1hViCHDm5rLlkN0efeRyA/Q/9npJVX0jzqNJv+bLPzbhac2Ixjp/4IlZryaxUbMr0c7Hg5K8cedwpoeb5rLAyixs+vonOJh+vPFRP/eGRcLP+cBf1h7uo2pTPjhuqKKxcPF2cbWYb915zL+996L3U9dWd9fiYFuOOPXdQ7Cie1YrNyTguKsZxUTHRgTCBo90MvtZF8JQXYiOLWYZb/PS1+On7cz3mUgeZG/OxbczHlJc5L2McTdbhPDc2m43169ezfv16ALxeb6KKs76+noFRv/j0er0cOHCAAwf0cN7tdidCzsrKSiyW82u6vxBCiPODhJpCLFHbbnhzItSs3fsifR3tZBcWpXlU6VVa+i5qav+FWGy2w8IY9Q33zE6oaZdQUywwOVVgtEA0BP52CPSBNb1dmUV6FVQ4uf7WTXQ2+9j3cAN1BzsT+xqOdNFwpIvKjXnsuKEad9XiCDftZjtZlqmPNabFuO/wffMWag4z2s3YLyzCfmGRHnAe62botS4Ctd7UgPOMn/AZP/2PNGAusZMZr+A056cn4JxoHc7Hj3t4rlbW4ZwKl8vF1q1b2bp1K5qm0dHRkQg5GxoaCIfDKcd7PB48Hg979uzBYDBQXl6eqOIsLS3FaDw/w2IhhBBLi4SaQixRBZXVVG7aQuORg2hajB9/Su/o+tlf/ynNI0svm60Kv//YrF/X6907K13SpVJTLDgGI+StgI7456brFJRtS++YxIJQUO7kDTdvpPuMn1ceauD0wQ6I52qNr3bT+Go3Fevz2PHGKoqqF3YQfqr3FIc6D03rnH2efZzqPXXOzYNmymg3Y99RhH1HEbHBMEPHeuIBZy9EkwLO1gHCrQP0/6UBc7FdX4NzYz7mAtskV59bM1mH06Bge1UuyyxhtrrP70BOKYXb7cbtdrNr1y4ikQhnzpxJVHG2tLQQi40ExbFYjMbGRhobG3nqqaewWCxUVVUlKjmFEEKIxUpCTSGWsG03vIXGIwfTPYwFJTOzfE5CTdC7o59rqKlSQk1ZU1MsEPmrkkLNkxJqihR5pQ6u+9gGulv97Hu4gVP7R8LNpqPdNB3tpnxdLjtuqKZ4+cIMN4cbBc3kvLkMNcvuvGxKxxlsZuzb3di3u4kNRRgaruCsGRVwtg0Qbhug/9FGzEW2kQrOwvQFnKPX4TzQ1Mtjx8auwxnT4OX6Hl4GfnUSfnzyGa5d5+aatW42l7nOy3U4hw13Rq+srOTKK68kGAzS2NiYqOTs6OhIOT4UClFTU0NNTeqSIs3NzZSVlaHU+fu1FEIIsbhIqCnEEla1eSt5ZRV0tzSleygLRpH7Jjo7/zIn145E/Od8DaN0PxcLkTQLElOQV+Lg9R/ZwI7rB9j35wZO7fMk1k1sPtZD87EeytbksOOGakpWutI61tH8oZl9/57peXPJkGnCvs2NfZubWCDC0PEehl7tIlDTA5GkgLN9kHB7I/2PNWJy27ANV3C609eJ3GhQ7KjKZUfVyDqcjx3r4LFj7bIO5zRkZGSwatUqVq3Sv3f7fL5E06GDByf+ZfdPfvKTMdukeZAQQoiFTEJNIZYwpRTbbngLj953V2JbNBLBaDp/P/qFhddiNDqJRn2zfm2TyXHO15BKTbEgJYea0ixInEVuiZ3X/c16dtxQxb4/N1D78ki42XKil5YTvZSudrHjhmpKV+Wkd7BxDsvMvn/P9Lz5YrCasG8pxL6lkFggQuBED4OvdhE42QtJ61hGPIP0e5rof7wJU2EmmRsL9CZDblvaqvaS1+G8dfdyOn1Bnjju4VfPHeNod5RwUhd4WYdzck6nk02bNrFp06ZJQ83x1NTUsHz5clmDUwghxIJ0/iYbQpwn1l66m+d/9QsG+7wA1Ox9gbWXXJHeQaVZddXHOXX6m7N+3Zyci8/5GtH+/sTjwLFjBGtryVi5cpIzhJgHBVKpKaYvp8jOtX+9nh3XV7P/zw2cfNmDFm9mc+aklzMnD1Ky0sWOG6ooXZ2T1imvFxVdNK/npYPBasJ2QSG2CwqJBfWAcygecGpJCWGkYwjfE034nmjCVJAZX4OzAHNR+gJOgAJnBu++sIKiwTqCEQ2K1055Hc7XrXNz7To3lXnpq0JdKEZXXsZiMf75n/8ZALPZPKbh0AMPPIDT6eSCCy5gy5Yt5ObmztdQhRBCiLOSUFOIJeg773rjhPsevutbPHzXt1K2nW/NgyorP8bAQC1t7b+dtWu6XBed03qaA3v20HXPvQzu25fYFjx+grob34Rt+3byP/Fx7Lt2zcZQhZi+vKQ1A3vqIBICk1Q/ialxuW1c/aF1bLu+iv2PNHLypfZEuNla6+UP/3aI4hXZ7LihmrI16Qk3V+SsYJt7G/s9+6d8znb39rQ1CTpXhgwTts2F2DYXEgtGCZyMB5wnelIDzs4hfE8243uyGVN+5kiToWJ7WgPODJNi93TW4azv4V8eOs4qt0PW4RzFYDAkHn/uc5/j6NGjHDx4kObm5sR2n8/Hc889x3PPPUdVVRVbtmxh7dq1WCzy74AQQoj0klBTCHFeWrfuW9jtK6lvuHfcqegGQyaxWIBEt4tJGaiu+sSMx+L93/+l7Z9uh6ROpckG9+2j6W8+QvFX/xnX294249cRYsYsdsiugL4m0KLQWw8Fq9M9KrHIuAptXP2BtWx/QxUHHmngxJ52YvFws+1UH3/8/iGKlmWz44YqytflzntodsvmW7j5sZuJaeN/L05mUAZu3nzzPIxq7hkyjNg2FWDbVEAslBRwHh8VcHYN4XuqGd9TzZjyrCMVnCXpDTjHrsM5EA84ZR3O6crIyGDr1q1s3bqVzs5ODh48yOHDhxkYGAmKGxoaaGho4OGHH2bjxo1s2bKFkpISaS4khBAiLSTUFGIJGq/yMrl687qP/y3rr7h6Poe0IFVWfozKyo/R0fEY7Z7fEQn7MJmdFLlvorDwWlpbf8PxE18CJvsProG1a75Obu4lMxrDwJ49kwaaCbEYbf/vnzCXlEjFpkiP/JV6qAn6FHQJNcUMZRdkcuX717LtDVXs/0sjJ15sIxbv0N1e18eDPziMuzqLHTdUU7F+/sLNncU7uX3X7Xxlz1cmDTYNysAdu+5gZ/HOeRnXfDJYjNg2FmDbqAecwZpefQ3O491ooaSAszuA7+kWfE+3YMzVA07bxnzMpY60hlv6OpwOVhQ6EutwPnlCr+B8rraLYNI6orIO5+QKCgp43etex9VXX01NTQ0HDx6ktrYWLZ4SB4NB9u3bx759+3C73WzZsoVNmzZhS1obXAghhJhrEmoKcR6q2/+yhJpJCguvpbDw2jHbS0reidVaSn3DPXi9e8fsd7kuorrqEzMONAG67rn37IHmsFiMrnt/OCuhZrC2loE9LxEb8GOwO7Dv2ilrd4rJ5a+C00/ojztPwtob0zsesehl5Wdy5V+tiVduNnLsxVZi8e7cnvp+/nT3YQorney4oZrKjXnzEpa9deVbKXGUcN/h+9jn2Tdm/3b3dm7efPOSDDRHM1iMZG7IJ3NDPlo4SiARcPagBaOJ46I9AfzPtOB/pgVjTkY84CzAXJbegBP0dTjftaOCd+2oYDAU4bnaLh4/5uEJWYdzyoxGI2vXrmXt2rX09/dz+PBhDh48SE9PT+IYj8fDI488wmOPPcaaNWvYsmULy5YtS5naLv4/e/cdH1WdNX78c6dk0nsBUgmEEgiQUCMgoCAIq2tBXde2KmvXXddd131+z+66+2zfdYuuHbuuuvaGoCIgvSW0ECAQUkjvPZNk5v7+mDAzQWrI5E4579crL+beO3fmJCFTzpzvOUIIIVxBkppC+KCiPTlYerrRG4xah+L2IiNnEhk5k4rKD9m//yEATKZ4Jk1cfl49NMGWWHTuoXk22rdvP6/hQSfr3Xmc9O4Up+U8LCjvQ/ALhtQ5EDtWs5CEdwiJ9GfO90eTtSiZ3FXF5G10JDeri1v47Kk9xCSFMHVJCikTol2eKJsxdAYzhs7gcMNhnt3zLCuLVgKQPTSb5y55zqX37a4Uo56AcdEEjItG7bbSWdBAx95aOvbX9U1wNphp/aaM1m/K0IebbEnRCdH4JYZonuAM9DOwsB99ONNibX04F6R7bx/OE4cHnUpoaCizZ89m1qxZFBcXk5ubS15eHj09PQBYLBby8vLIy8sjLCzMPlwoPDzcdcELIYTwaZLUFMIHdXV0cCw/j+SMSVqH4jGCgxwJHaMx5LwTmgBtm7f067zG998n+p57zv086d0p+qtwLWx/wbFdtRdW/tx2OXkmzHkYUudqEZnwIiGR/lx4/WiyFqWQ+0UxeevLsfQuF64paWHF03uJTgxm6uLhDJ8YjeLi5NLIiJH8YPwP7EnNkpYSl96fp1CMOgLSowhIj0LtOSHB2emU4Gw007qhjNYNZejDTASMjyJgQowtwalxYtC5D+f/LB7L4erWU/bhLKhupaC6lafWOvpwLkiP5YIR0T7bh1NRFFJSUkhJSeHSSy9l37595OTkUF5ebr9OU1MT69atY926daSmppKVlcWYMWMwGOTtpxBCiIEjzypC+KjCnO2S1DwHen2A/bLF0jEgt2lta+3XefUvvUz9Sy8THRKCJTaW8i++xC8lBb/k5N5/k9D5+/c5R3p3in7LeRU++RGcqsdg8UZ47Uq47HHIumlwYxNeKTjCxOzrRvVWbpawb30Zlt6BNbWlrXz+7F6i4oOZuiSF1EkxLk2QpYWnYdAZ6LH2UNZaRpO5iTBTmMvuz9MoBh0BY6MIGNub4DzcaEtw5tWhdvbYr2dpMtO6sZzWjeXoQ/0cFZxJoZonOAHpw3ke/P39mTJlClOmTKGqqoqcnBz27NlDR4fjtVJhYSGFhYUEBAQwYcIEMjMzGTJkiIZRCyGE8BaS1BTCRzz09qcU7c7hvT/8CrD11Zx78zLNl4N5Cp3OkSS0Ws0Dc5tBwed1vr6lBX1LC01HjnzrmGHoUPxSku3Jzsb33tekd6fwcIVrT5/QPE61wicPQHiiVGyKARMUZmLWtWlkLkxi15cl7FtXRk9vcrOurJWVz+0jclgQUxanMDIr1iXJMT+9H2nhaeTX5wOQX5/vE/00+0Mx6AgYE0nAmEjUK62YjzTaenDur8Pa7pTgbO6idVM5rZvK0YX4ETA+isCMGPxS3CPBKX04+y8uLo5LL72UBQsWcPDgQXJycjji9Bqlo6ODrVu3snXrVoYOHUpWVhbjx48nICDgNLcqhBBCnJokNYXwIQnpGRhN/nSbO2msqqChoozIYQlah+UR9HpHUnOgKjWDsvv3xtiYlEhPZRVqV9cpr9NTUUFPRQXt/Vzifr69O4WXWPeXMyc0j1OtsO6vktQUAy4ozMTMpWlkXpLMri9L2LvuGD29k7jry9v4Ynke24cWMXVxCiMmxw54z8P0qHRHUrNOkppnQzHo8B8dif/oSFSLFfORpt4Kzto+CU5rSxdtmyto21yBLsRo69uZEY1peJhbJDilD2f/GAwGxo0bx7hx42hsbGTXrl3s2rWLxsZG+3UqKir47LPPWLVqFenp6WRmZpKSkiIftgshhDgnktQUwocYjEaSJ2RyePtmwFatKUnNs6PTOaoIrNaBSWqa0tIInDLlnIYFBU6dSvJrr6JaLKz/4EMM1dWMCQulq6iYruIiuoqK6T527OyrMk+jbfMWSWr6sup829Lyc1G8wXaeDA8SLhAY6scFV48k85Ikdn1Vyt61x+juHVLTUNHGFy/kEfHZUSZfmkLa1LgBSySlR6XzXsF7AOyv2z8gt+lLFL0O/1ER+I+KIPyKEZgLnRKcbc4Jzm7atlTQtqUCXbCRgHFRBGTE2BKceu0TXafqw/lVfhU5JQ3Sh/MUwsPDmTt3LhdeeCFHjx4lNzeX/Px8LBbb325PTw979uxhz549REREkJmZyaRJkwgNDdU4ciGEEJ5AkppC+JjUyVMdSc2c7Uy57CqNI/IMOp3Jftlq7UJVLSjK+b8xib73HkpuX3Z2SUidjuh77gZA0euxRkfRFR1F5Ny5fa6mdnXRdayMrqIiuoqKaF6xgs59+845tv72/BReonBd/87b+TIs+C0YTGe86mlV59tiMLeAKUQmrQu7gBA/sq8cQeaCJHZ9VcKetcfo7h1Q01DZzlcv7WfHiiKmXJpsS27qded1f2MjHf/vjldsiv5R9Dr80yLwT4sg/LsjMR9tomNvDR15dVhbu+3Xs7Z207a1kratleiCjic4ozGlhmsX/AmkD+e50el0jBgxghEjRtDe3s7evXvJzc2lsrLSfp2Ghga+/vpr1qxZw8iRI8nMzGTUqFEyXEgIIcQpyTOEED4mNXOq/fKxA3l0trXif569HX2BoijodAH2Kk2LpROD4fx7ZgVlZzP0t7858xAfnY6h//fbs+pzqfj5YUodjil1uG3bYOhXUvN8e34KD2du6d95W5+xJTbjp0DyBbavxGngd5Z/L4VrbcveT1YlKpPWhRP/YCMzrhjBpAVJ7F5dyp6vS+nqTW42VrXz1cv5bP+siCmLUxg1rf/JzVGRo9AreiyqheLmYlq6WgjxCxnIb8UnKXoF/5Hh+I8Md0pw1tKxr7ZvgrOtm7ZtlbRtq0QXaCAmUqF1iIpqsaKcZ8J6oJzYh3NDQS1fSh/OUwoMDGT69OlMnz6d8vJycnNz2bNnD2azrWe5qqoUFBRQUFBAYGAgEydOJCsri5iYGI0jF0II4W4kqSmEjwkKj2DIiDQqjxSgWq0U7c5hzAUXah2WR9DrHUlNq7UTGJg3IOFLl2KMj6f2qadp3779W8cDp04l+p67+z24p7+9O/t7nvASpvNI2vR02paiF2+wbesMMHRSb5JzJiTNgIDwb58nk9ZFP/gHGZl+eSoTL05kz9el7P76GF0dtmXNTTUdrH4ln+29y9JHzxiC/hwTYSa9iRHhIzjUcAiAA/UHmDpk6hnOEudC0Sn4jwjHf0Q44ZePoKuomfa9NXTsq8Pa4kgKWtt7CGvXEXYMKvZvxT89isCMaEwjw90mwRnoZ+CScUO4xKkP51e9fTgLpQ/ntwwbNoxhw4ZxySWXkJ+fT05ODkVFRfbj7e3tbN68mc2bN5OQkEBmZibjx4/HZDrP1QBCCCG8giQ1hfBBqVnTqDxSANiWoEtS8+w4L0G3WDoH9LaDsrMJys7GXFBA2+YtWNta0QUFE5Q947z7Wva3d6f00/RxqXP6d15oAjQf67vP2gNlO2xfmx4HFIgb76jkTL4Aqvef/aT1j++Hil0wdZksSRd2/kFGpl2WysT5Sbbk5upSzL1DaZprO1nz2gF2fl7E5EW9yU3D2SfB0qPS7UnN/XX7JanpQopOwZQahik1jPDLRtBV0kzHnlra99Vibe6b4GzfUUX7jiqUAAMB6bYl6v4jw1HO4XfrSs59OH9xDn04Y0JMzB8by4L0OJ/pw2k0GpkwYQITJkygvr6eXbt2kZubS0uLY9XAsWPHOHbsGCtXrmTcuHFkZWWRmJgow4WEEMKHSVLThRRFCQGOl7oYrQMwuEOIgZCaNZVN77wBwNHcHVitFnQ673/BfL70+oEfFnQiU1qaS5KJ/e3dKXxY7FhbVeW5DAtKngW3fgbNFVCyCYp7v6pPHK6iQtVe29e2Z227DAFnP2kdFbYvt31FDIfZP4Gsm88+TuHVTAEGpi4ZzsSLEtmz5hi7VpdgbnNKbr5+gB0rishalMzY7KHojWdOgKVHpfPh4Q8BGRY0mBSdgiklDFNKGGHfSaWrpJmCz3cRVKVg7HQkstSOHtp3VtG+swrF30BAeqQtwZkW4TYJTjj7Ppw1LWbe3FbKm9tK7X045/f24Yz0gT6ckZGRXHTRRcydO5cjR46Qk5PDwYMHOf5eqru72z5RPSoqiqysLCZOnEhwsLTNEUIIXyNJTdd6CPj18Y2qqioNQxHCIXb4CIIjImltqKeztYWKQweJH5OudVhuT+80Ad1icU1S01Vc0btT+IA5D9uWe59NslHRwZyf2S6HDoXxV9u+ANrroWRzb5JzI1Ts/vZt9vTzb6rhqK1yc+O/YMlj0m9T2PkFGJiyOIUJFyWwd+0xdn1ZSmebrVdjS30n6/5zkJ2fF5G1MJn0mcNOm9yUYUHaO57grB2rUjtGJTs1y96D09Jotl9P7eyhPaea9pxqFH89AWOjHAnOs0hgD5bz7cM5f2wcKdHe3YdTp9ORlpZGWloabW1t7Nmzh5ycHGpqauzXqaur48svv2T16tWMGjWKzMxMRo4ciV4vH9YLIYQvkKSmaz0GPNd7eWVcXFyGlsEIcZyiKAzPmsre1asAKMzZJknNs6DT+9svW6wDu/x8MJypd6cxKZGhv/mNJDSFQ+pcuOxfZ14WruhsfS5PlVAMjIQxS2xfAJ3NcGybo5KzdBuolvOLte4wvHoFXP6E9NsUffj5G5i8KIWMuQnsW1fGrq9K6GixJTdbG8x889Yhdq4stiU3Zw3FcJKlvqMjR3PX5n8B8Gz2j2nrbiPI6N0JJbemgCk5FFNyKGFLhtNV2kLHvlo69pyY4LTQnltNe241iklPwNjeCs5RkW6V4JQ+nGcWFBREdnY2M2bMoKysjJycHPbt20dXly0BbLVaOXDgAAcOHCA4OJhJkyaRmZlJVFSUxpELIYRwJUlqupCqqi1AC4CiKN06nfu8eBIiNWuaU1JzO7O//wNtA/IAzpWa1gHuqTlYTuzd2bppE21r1wLgP2asJDTFt2XdDOFJsO6vjsE/zpJn2So0z6VC0j8URs63fQGs+QOs+/MABKvCJw9AeKJUbIpv8fM3kLUw2Zbc/KaM3C+K7cnNtkYz698+xM6VRWRdksy42cMw+DmSmwEGx+O/isrB+oNkxWUN+vcgvk1RFExJoZiSQgm7dDjdx1pp31dLx95aLPWO52rVbKF9Vw3tu2pQ/PT4j40kMCMa/9ERKG7Us/JkfTi/yrclOKUPp+33nZCQQEJCAosWLSIvL4/c3FxKSkrs12ltbWXDhg1s2LCB5ORkMjMzSU9Px8/P+5fuCyGEr5GkphA+Knn8RPRGI5bubmpLi2mqriIsNk7rsNyac6Wmq3pqDpbjvTsDp0/jaG9Ssz03B1VVpeG++LbUubav6nwoXAfmFtt09NQ5AzOoJyDy/G/jONVqS8BKUlOcgtGkJ3NBEuPnxJP3TRm5X5TQ3juApr2piw3vFJCzqpjMS5IYd2E8Rr9vJ4f21+2XpKYbUhQFv8QQ/BJDCFuUQndZKx37amnfW4ulzinB2WWhY3cNHbtrUPx0+I/preAcHYnuJL9vLR3vw3nXHOc+nNWsL6jx+T6cfn5+ZGZmkpmZSW1tLbm5uezevZvW1lb7dYqLiykuLmbFihVkZGSQmZlJfHy8vNYRQggvIUlNIXyU0d+fpHETOLprJwCFudvJXPgdjaNyb3qd0/JzD63UPJFp5Eh0ISFYW1qw1NTSXVqKX1KS1mEJdxU71jXTxvs7af1UijfYErAyGV2chtFPz6T5SYy/MJ68DeXkriqmrak3udncxcZ3D9uSmwuSGT8nvs+50lfT/SmKgl9CCH4JIYQuTKG7vM22RH1vLT21jg8m1S4rHXtsS9cVo1OCc4z7JTid+3B2dFlYX1DDl/ur+PpANXU+3oczOjqaBQsWcNFFF1FQUEBubi6HDh1C7S1t7erqYufOnezcuZPY2FgyMzOZMGECQUHe+fMQQghfIUlNIXxYatY0R1IzR5KaZ6Jzmn5u8fBKzeMUvZ6AzEm0fbMegPadOZLUFIOvP5PWz6RwnSQ1xVkx+OmZeFEi42YPI39jBTtXFtPW25exo6WbTe8fJvfL4j7nyAR0z6IoCn7xwfjFBxN6STLdFU4JzhqnBGe31TZ8aG9vgnN0BAEZMbYEp8m9EpwBfvo+fThzSxr4UvpwotfrGTNmDGPGjKGlpYXdu3eTm5tLXV2d/TrV1dWsWrWKL7/8kjFjxpCZmcmIESOQVmFCCOF5JKkphA9LzZrK6hefBqB03266Ojvw8w84w1m+S++8/NzDpp+fTmDWZHtSsyNnJ+FXXqFtQMI3ncuk9bNhbhmY2xE+w2DUkzE3gfSZw3jm/rV9jh3vvQnYBwY9+fnX9n33PnPRoMQozp+iKPgNC8ZvWDChC5LpqWqnfW8tHXtr6Kk+IcG5r46OfXVgsCU4AzOi8R8bic7kXm+h9DqFKSmRTJE+nH2EhIQwa9YsZs6cSUlJCbm5ueTl5dHdbft7tlqt7N+/n/379xMaGmofLhQREaFx5EIIIc6Wez0jCyEGVWhMLNFJKdSWFGHp6aFk725GTp2hdVhuy3lQkMVqPs01PUtAVqb9cntOroaRCJ92tpPWz5Yp5PxvQ/gkvRtNxRaupSgKxiFBhA0JImxBMt1VbbTvqaVjXy09Ve2OK/ZY6cyrozOvDgwK/qMiHQlOf/d7O9WfPpwBRj0XjopmQfoQr+vDqSgKycnJJCcn24cL5eTkUFZWZr9Oc3Mz33zzDd988w3Dhw8nKyuLMWPGYDQaNYxcCCHEmbjfs7AQYlClZk2ltqQIgMKcbZLUPA2dzjsrNQMyMsBohO5uuo4coaehAYNUKQgtnGnS+rkY6D6dwqc4V15aeqwc3FLJmtcP9LmOcVQHt967CKObLUsW/WeMCyJsQW+Cs7qdjj01dOyrpbvSOcGp0rm/js79daBX8B8VQUBGNAHpUW6Z4DzbPpwd3RZW5VWxKq/K3odzwVjbMnVv6sPp7+/P5MmTmTx5MtXV1eTk5LBnzx7a2x2/46NHj3L06FH8/f3JyMggKyuLoUOHahi1EEKIU3G/Z14hxKBKzZrGtg/fAaAwdweq1YoiPYVOynn5ubf01ATQBQQQkJ5Ox+7dAHTk5hJykSylFBpxnrS+/QXYvhxQz3DSCZJnST9NMWD0Bh3ps4Z9K6nZfSiA9/66k8V3ZRAaLa1bvI0xNhDj/GRC5yfTXdNuGya0t5buSke/Siwqnfn1dObX06BX8E9zSnAGuN/brP704fz9inxGx4XwvWmJXD05gVB/76lcjI2NZdGiRcyfP59Dhw6Rk5PDkSNH7MOFOjs72b59O9u3b2fIkCFkZWWRkZFBQID8vQshhLtwv2dbIcSgGpo2Cv+QUDpbmmlrqKe6qJC41JFah+WW+gwK8pLp58cFTJ5sT2q279wpSU2hvdixsORvMHQCfPwAZ53YVHQw52cuDU34try4DYyrmgVA3bFW/vvH7Sz84XgSx0RqHJlwFWNMIMaLkwi9OMmW4Nxnm5beXXFCgvNAPZ0HehOcI8MdCc5A90sEntiH80hNqz3BeWIfzoNVLfzmk/38ddVBrsyM5+bsFEYP8Z4WHwaDgfT0dNLT02lqamLXrl3k5ubS2Nhov05lZSUrVqzgiy++YOzYsWRmZpKSkiLDhYQQQmOS1BTCx+l0elInTWb/+jUAHNm5TZKap+DcU9Oblp8DBE7Oov7FFwHokL6awp0cX5L+6UNQf/j011V0cNnjtkpPIVxkfeo71AYdY9bRpehVA+a2Hj751y4uuHokEy9ORFG8a5q06MsYE4hxXhKh85Loqe2gvXeKendZq+NKFpXOgw10HmygQXcY08hwWw/O9Cj0QWdOcB57xDa8L+FPs131bXzLiJhgRsxx9OFcc6CaL/ZXseFwDZ3dtj6c7V0W3thawhtbS5g+PJJbLkhhQXocRr33JPbCwsKYM2cOs2fPpqioiNzcXPLz8+np6QGgp6eHvXv3snfvXsLDw8nMzGTSpEmEhYVpHLkQQvgmSWoKIUidPM2e1CzM2c4F13xf44jck85Ll58DBGQ6hgV17NuHtbMTnb//ac4QYhClzoUHdkLOq7D+H9BQ+O3rJM+yVWhKQlO4WHJoMvlspj6wguuKHqKr1YqqwsZ3D1NT0sK8G8dg8JM+m77AEB1A6NxEQucm0lPXQce+Wtr31tJ9zCnBaVUxH2rAfKgBPijANKK3gnNc9FklOLUQE2Li2qmJXDs1kTZzDx/uKuPVTcUcrGqxX2fr0Xq2Hq0nLtTEDdOT+d60RGJDvOd1g06nIzU1ldTUVDo6Oti7dy+5ublUVFTYr9PY2MiaNWtYu3YtI0aMIDMzk9GjR2MwyFtsIYQYLPKIK4QgeUImOr0eq8VCVWEBrQ31BEfIMroT9anU9KLp5wCGyEj8hg+n6+hR6O6mc98+AqdM0TosIfrKutn2VZ0Pe/4LG/5u2x8UC7d+pm1swusdHx5UtO5zipuLqQopIvj6WoxfpVB1tBmAQ9uqaKhs59K7MgiJ9J4EjzgzQ1QAIXMSCZmTSE99pyPBWepIBGIFc0Ej5oJGGj88jCn1eIIzCn2we04bDzIZuGF6Mt+flsS2o/W8urmYlXmVWKy29elVzWb+/uUhnvi6gEvHD+WWC5LJSorwqorlgIAApk2bxrRp06ioqCA3N5c9e/bQ2WlrRaSqKocPH+bw4cMEBgYyYcIEsrKyiI2N1ThyIYTwft6zVkAI0W/+QcHEj063bx/N3aFhNO6rT6Wmly0/BwiYnGW/3L4zR8NIhDiD2LEw7/+B3mTbbquG9nptYxI+Iz3K8Xx50LyPK3+SxdiZjsnINSUt/PcP2yk72KBFeMINGCL9Cbkwgbh7JzHk51MJWzIcv6QTelBawXy4kcYPDlPx+63UPLeH1i3lWFq6Tn6jGlMUhempUTx5QxYbf34RD1ycRnSwyX6826Ly8e5yrn56M0se38Db20vo6LJoGLFrDB06lMWLF/PQQw9x9dVXM3z48D7H29vb2bJlC0899RTPP/88O3bssCc/hRBCDDxJagohAEjNmmq/XJizTcNI3Jde50hqeltPTYDArMn2y+05OzWMRIizoDdA7BjHdtU+7WIRPsU5qbm/bj96o455N45hzvWj0Ols1Wmdrd189K9d7FlTap+kLHyTIcKfkNkJxN4ziSGPTCPsO6n4JYf2vZIK5sImGj88QsUfttp3W809gxzt2RkS5s9PFoxi0yMX8fj1mUxJjuhzfH9FMz9/by8z/ria33+2n+K6tlPckucyGo1kZGRwyy238KMf/Yg5c+YQGtr391pWVsann37KY489xgcffEBxcbE8HgghxACT5edCCMDWV3Pd67ZBMcV7dtHT1YXBzz2XQmlF7zz93Op9n7oHZjn11czdhWq1oshUT+HO4jKgYrftcuU+GH6htvEInzAm0pFML2gsoMvShZ/ej/FzEogcFszK5/bS0dKNalVZ/3YBNcUtzLlhNAaj9Nn0dYZwEyGz4gmZFY+lyUz7vlqaPjmhR7BTzqv815u/dRuDOTzoTPwMOi6fOIzLJw4jr7yJ1zYX8+GuMvtgoaaObp5ff5TlG44yd1QMN1+Qwpy0GHvy31tEREQwb9485syZQ2FhITk5ORw4cACr1fZz6O7uZvfu3ezevZuoqCgyMzOZOHEiISHeM0FeCCG0Iu9WhRAARAyNJ3yIbflct7mTY/v3ahyR+9E59dT0xuXnxuRk9FFRAFibmzEfPsOkaSG0NmS843JVnnZxCJ8SZgojITgBgB5rDwWNBfZjw9LCufZ/phKb7EhWHNhSyQd/y6G1wfs+DBP9pw8zETIzXuswBsy4YWH86eoJbP3FfP53yViSIgPtx1QV1hys4daXtjPvsbUsX19IU3u3htG6hk6nY+TIkVx77bU89NBDLFy48Ft9Nevq6vjqq6/4+9//zptvvsmBAwewWLxvmb4QQgwWSWoKIQBbr6TUrGn27SM52zWMxj3pnXpqWr2wUlNRFAKzHH01O3Kkr6Zwc3HOSU35IEYMnrFRY+2X8+vy+xwLjvDnyp9mMWbGEPu+6mJbn83yw42DFaLwEAl/mt3na+j/m37qK+ug7q0DdFW473LusEAjy2ansvanc3np1qnMGx2D88yg4rp2fvdZPtP/+BWPvLeHvPIm7YJ1oaCgILKzs7n77rv54Q9/yOTJkzGZHD1IVVXl4MGDvPXWW/zjH//gyy+/pLa2VsOIhRDCM0lSUwhh17ev5nbp+3MCnc55UJD3JTVBhgUJD+NcqVl9ACzu2X9OeJ8T+2qeyGDUc9EtY5l9XRpK71LbjpZuPvp7LvvWHZPnV3FK+hBH65/I74/BGB/sOGiFjl01VP8rh9qX9mEubHLb/0s6ncK80bG8dOs01v50Lj+cPZxQf0fns85uK29tL2XJ4xtY+vQmPt5dTlePVcOIXUNRFOLj47nssst46KGHuOKKK0hOTu5zndbWVjZu3Mi///1vXnzxRXJzc+nqcs+BUUII4W6kp6YQwi5h7Dj8AgLo6uiguaaK+rJSohKStA7LbTj31LRavW/5OUDgZMewIKnUFG4vIAJCE6D5GFjMUFdgm4wuhIulRzqSmidWah6nKAoT5iUSNSyYlc/vo7O1G6tVZd2bh6gpaeHC741Gb5T6AnFqgRNiCMiIxny4kZZ1xzA7Vfp2Hmyg82ADfkkhhMxJxH9spD2B7m6So4L4f0vS+cmC0Xy8u4xXNhWzv6LZfnxHcQM7ihuIDjbx/elJfH9aEkPC/E9zi57Jz8+PSZMmMWnSJOrq6sjNzWXXrl20trbar1NSUkJJSQmff/4548ePJzMzk4SEBBTl5L/bRx99tM+/Qgjha+SVlBDCTm8wkjLBUal3ZKdMQXem0zmWDVmtZlTV+yoK/MeMQQmwJW+7y8rorqzUOCIhziBunONypUxAF4PDefn5oYZDdFtP3R8wfnQE1/xiCtGJjoq7/Rsr+ODvObQ1ml0ap/B8iqLgnxZBzLIMYu+bREBGNDjlt7pKWqh7bT9V/9xJ244qVDeudgzw03Pd1CQ+e2AW796VzeUTh2FwSsTWtpp5fHUBM//8Nfe+kcPWwjq3rUQ9X1FRUcyfP58HH3yQ66+/njFjxqBzGs7Y1dVFTk4OL7zwAk899RSbNm2irc192w4IIYRWJKkphOgjdbKjr2ah9NXsQ1F0fZage2VfTaORgAkT7NtSrSnc3hDpqykGX4R/BEODbMP1uqxdFDYWnvb6oVEBXP2zyYyaHmffV3W0mf/+YTuVhd7ZU1AMPL+EEKJuGEvcTyYTNG0I6B0JwZ7qDhrePUTlX7fTsr4Mq9l9h88oisKUlEgevz6TTb+4iJ8sGEVcqOODY4tV5bO9FVz33BYW/XM9b2wtps3sne1F9Ho9o0eP5nvf+x4/+clPWLBgAVG9QxuPq6mp4YsvvuCxxx7j7bff5tChQ/bJ6kII4etk+bkQoo/hkyaDooCqUn4wn47WFgKCQ858oo/Q6wPsyUyLpRO9PvAMZ3iewMlZtG/dCtj6aoYuXqxxREKcRpxMQBfaSI9Kp6KtArD11RwdOfq01zf46Zn/g3RiEkPY9N5hVBXam7v44LEc5lw/mvRZwwYjbOEBEv40+7THjTGBRFyVRuj8ZFo2ltG2pQK1N4lpaeqi6bNCmr8uITh7KMEXDEMf7Hfa29NSbIg/D1ycxt1zR/Dl/ipe2VTE1qP19uMHq1r4fx/s408rDrB0SgI3zUgmNSb4NLfouYKDg5k5cyYXXHABpaWl5Obmsm/fPrq7bZXgVquV/Px88vPzCQkJYdKkSdoGLIQQbkAqNYUQfQSGhTN05CgAVNVK0a6dGkfkXvouQfe+Sk2AgCxHX832XKnUFG5uSIbjsiw/F4PoTMOCTkZRFCbNT+KyByZhCrLVFlgtKmteP8Da/xzE4sZLh4X70Yf6EX7pcIY+Mo3QRSnogo32Y2pHDy1fl1L55+00fnyEngb3fs1i1OtYnDGUt+/MZtWPL+SG6UkE+untx1vMPby0sYiLHlvHTS9s5cv9VVis3rk0XVEUkpKS+O53v8tPf/pTLr/8chISEvpcp6WlhfXr19u3S0tLBztMIYRwC1KpKYT4ltSsaVQUHARsS9DHzpqrbUBuxHlYkMXincOCAiZNBJ0OrFbM+Qeo/ue/MERGEpQ9A1NamtbhCdFXZCoYAqCnA1oroa0WgqK1jkr4gLGRjr6a++vPLql5XOLYSK79xVRWPLOXumO2ISF535RRX9bKwjvGExRmOsMtCOGgCzAQOjeRkJnxtOVU0bLuGJZ6WxJT7bbSuqmc1i3lBE6MJWROAsYhQRpHfHqjh4Tw+ysz+PmlY3hv5zFe21xMYa2jn+T6glrWF9QSHx7ATdnJXDclkYgg961GPR8mk4msrCyysrJOOwzohRde+NY+GR4khPAFktQUQnxLatZUNr79GgBHd+3AarGg0+vPcJZv0OmckppeOgG9c+9eFH9/1PZ2UFXqnnnGfixwyhSi770HQ3Q0bZu3YG1rRRcULAlPoR2dHuLSoay3qrxyL4yYp21Mwif0GRZUf4geaw8G3dm/tA6NtvXZ/Pq1fA7vqAag4kgT7/xxB5femUHc8NABj1l4N8WoI3j6UIKmDKFjXy0t60rpLu9NBlqhPbea9txq/MdEEjI3AVNKmLYBn0Gov5FbZw7nluwUNh6p5ZVNxXx9oIrjBZpljR386fMD/P3LQ1w+cRg3ZyczISFc05iFEEIMLklqCiG+JSZ5OMFR0bTW1WJua6P8YD4J6ePPfKIP0OudBgVZ3HspV380vvsuFb/6NZyiAX37jh2U3HrbSY8dT3gGZWe7MkQhvi1unCOpWbVPkppiUEQHRBMbGEt1ezWdlk6ONh0lLeLcPtwxmvRccvs4YhJD2PLhEVQV2hrNtj6b3x/F2Aukz6Y4d4peIXBiDAETojEXNNKyrhTzEcdAqs4D9XQeqMcvOZSQOQn4j4lEcZpC7m50OoXZaTHMTouhtL6dN7aW8Pb2Ehrabb0mu3qsvLvzGO/uPMakxHBuzk5myYShmAze9YH8ySovT1WNGRwcTG5uLhMnTuwzVV0IIbyNPMIJIb5FURRGZE21b7/9m0d47LrvaBiR+9DrvHf5edvmzadNaJ5J+44dlNy+jMb33sNcUED9q69R+/TT1L/6GuaCggGOVggncdJXU2ijP301T6QoClkLk/nOfRMxBdrqDSw9Vr5+9QDfvHUIi0X6bIr+URQF/1ERxPxwArH3TiJgXBQ45S67ipupe3U/Vf/MoW1nFaoH/F9LjAzkkUvHsPkXF/O3ayYyIaFvtemu0kZ+8t/dXPDHr/nLygOUNXrXa7VT+cEPfsCQIUPs262trXz00Uc8//zzFBcXaxiZEEK4llRqCiFOKjVrGru//FzrMNyOzrlS08sGBdU++VS/E5p2VisV/+9/T3rImJhI1J13ELF06fndhxAnGiIT0IU20iPTWVu6FoD8+ny+y3f7fVtJ46K45hdTWPH0Xup7lwzvXXuMurJWFv5wPIGh3tkzUAwOv8QQom5Kp7umnZZ1x2jPrQaLbR13T3U7De8covmLYoJnxxM0bQg6P/eucvQ36lk6OYGlkxPYVdrIq5uK+HRPBV29idm6ti6eWnuEZ9YdYUF6HDdnp3DBiCgUxX0rUs9HSkoKd9xxB7t372b16tW0ttp69VZUVPDSSy8xduxYFixYQGRkpMaRCiHEwJJKTSHESSWOn4DBTwYVnEincyQ1valS01xQQPuOHS69j+7SUir/95ccumAmDe++69L7Ej4mbpzjcs0B6OnSLhbhUwaiUtNZWEwgVz88mRGZMfZ95QWNvPPH7VQXN5/37QthjAkkcukohj48leAL41GckpeWJjNNnxZS+adtNH1ZjKWtW8NIz96kxHD+ft0kNv3iIn62cDTDwpw+gFZhVV4VNyzfyoJ/fMOrm4to6fSM7+tc6XQ6MjMzuf/++5k9ezYGg6N+KT8/nyeffJIvv/ySzk7v+lBeCOHbJKkphDgpo5+JpPETtA7D7ThPP/emSs22zVsG7b4s9fVU/u8vObLoUto2bx60+xVezD8MwpNsl63dUHtI23iEz3BOah6oP4DFajnv2/TzN7DwjvFM/26qfalwa4OZ9/+Ww8EtFed9+0IA6MNMhC9OZegjUwldmIwuyGg/Zm3voWV1CZV/2kbjx0foafSM1zvRwSbunTeSbx6ex7M3TWbmyKg+xw9Xt/Krj/KY8YfV/OqjfRyubtEoUtcymUxcfPHF3HfffYwf71jJYLFY2LhxI48//jg7duzAer6rc4QQwg1IUlMIcUqpWdO0DsHteGtPTWtb66DfZ1dRESW33U7je+8N+n0LLxTnvARd+mqKwRETGEN0QDQAHT0dFDcPTO86RVGYcmkKS+6ZgF9Ab5/NbitfvZzPhncKsHpA70PhGXSBRkLnJTH0kamEXzECfaSjylHtttK6qZzKv+yg/r8H6a5q0zDSs2fQ61g4bghvLJvBVz+5kFuykwk2OaoW27osvLq5mPl//4bvP7+Flfsq6PHQv6lHH330lMOCwsPDWbp0Kbfffjvx8fH2/e3t7Xz66ac888wzFBYWDlKkQgjhGtJTUwjRx+kGAp3s2ENvf+rKcNyOc09NixdVauqCgrW5Y1Wl4pe/wjhsmExNF+cnbjwcXGG7XLkXJn5P23iEz0iPSuebY98AkFeXR2p46oDddkpGNNc8MoUVT++hobIdgN2rS6k91srCH44jIFj6bIqBoRj1BM8YRtDUoXTsq6Fl7TG6K3qTmFaV9pxq2nOq8R8bScicBEwpYae/QTcxMjaE33x3PD9bNIYPco7xyuZiDlc7PsjddKSOTUfqGBrmzw3Tk/jetCSig72r/VJiYiK33347+/bt46uvvqK52dbKorq6mldffZVRo0ZxySWXEB0drXGkQghx7qRSUwghzoHeqaem1eI9Sc2g7Bna3bnVSu1TT2t3/8I7yLAgoZGxkWPtl/Pr8wf89sPjAln68ykMn+hIOJQdbOCdP+ygptQ7l88K7Sh6hcCJscQ+kEn0reMwpfZNXnbm11PzzB6qn9lNx4F6VFXVKNJzE2wycFN2Cl8+eCH/+eF0Fo0bgl7nGBpU0dTJ3744xAV//JoH395FTkmDx3xvZ0On0zFhwgTuu+8+5s2bh9HoaDdw6NAhnnrqKVauXElHh/esQhJC+Aap1BRC9HFi5eXKp/5B3rrVAMy75YdkLe7/ZFdvoHPqqWmxes8LP1NaGoFTprh8WNCptG/fjrmgAFNamib3L7yA8/LzY9tg3V/BFAKpcyB27KnPE+I8DfSwoJPxCzBw6Z0Z7Pi8iG2fHAWgpb6T9/+yk4tuHkva1DiX3K/wXYqi4D86Ev/RkZhLmmlZe4zO/XX2411FzdS9nIchLpCQuYkETohG0bt/vYyiKFwwIpoLRkRT3tjBf7aW8Oa2EurabAPmuixWPsgt44PcMjLiw7gpO5nLJw7D3+je0+DPlp+fH3PmzCEzM5Ovv/6aXbt2AWC1WtmyZQu7d+9m7ty5TJkyBb3eO75nIYR3c/9nHiGEpmJTHMvoqouk7463VmoCRN97D+i0e1oYzGFFwgs1lIDS+/+3qw3W/A5W/hyemgEvLYbCtZqG10d1Pmx5xpZ43fKMbVt4rBOHBVlV1/TmU3QKU5cMZ/HdGRj9bcmGnm4rX7yQx6b3DmO1ek9VmXAvpqRQom9OJ+4nkwmcHAd6R4VjT1U7DW8fpPKvO2jdWIa16/yHZQ2WYeEB/HThaDb94iL+ed0kMpPC+xzfW9bEw+/uYcYfV/PHFfmU1rdrE6gLhIaGcsUVV3DHHXeQlJRk39/R0cHnn3/O008/zaFDh7yqWlUI4Z0kqSmEOK3YlBH2y9VHj2gYiXvQe2mlJkBQdjZDf/sbzRKbWgwrEl4i51V44yo4VTKpeCO8diXkvDa4cZ2ocK0twfrUDFvC1Z0Tr+KsxQXGEekfCUBbdxslzSUuvb/hE2O45pEphMcF2vflflnCp0/sorOt26X3LXybMTaQyGtGMeThqQTPikfxc7xesDSaafykkMo/b6P5q2IsHvR/0WTQc0VmPB/cM5NP7pvFNZMTMBkc31tjezfPflPIhX9dw+0vb2fdoRqv+RBh2LBh3HrrrVx77bWEh4fb99fW1vKf//yH119/nerqau0CFEKIM5CkphDitGKcKjXrykrp6facF6muoHOq1PSm6efHhS9dStILywmcOnXQ71uzYUXCsxWuhU9+dOqE5nGqFT55QLvEYc6rtsRq8caTH3eXxKs4Z4qiMDbKtX01TxQxJIilj0whJSPKvq80v4F3/ridujL5gEi4liHMRPh3Uhn6yDRCFySjC3J0NLO29dD8VQmVf95G4ydH6Gk0axjpuctICOOv10xkyy8u5heXjiEhwvFhtqrC6gPV3PLiNi7++zpe2HCUpg7Pf12sKArp6ence++9zJ8/Hz8/xwCyI0eO8PTTT/Ppp5/S1tamYZRCCHFy0lNTCHFapkBHJYjVYqGutJi41JEaRqQtvdP0c6vVs16on62g7GyCsrMxFxTQtnkL1rZWdEHBBGXPoKe2ltqnnqZ9+3YX3K+Gw4qE51r3lzMnNI9TrbYl36lzXRrSt5xr4jU8cfBjFOclPTKdjWW2hPX+uv1cOvxSl9+nKcDA4rsnsO3To+xYUQRAc20n7/5lJxffPJaRk2NdHoPwbbpAI6EXJxE8O572HVW0fHMMS28SU+2y0rqxnNbNFQROiiFkTgLGuCCNIz57EUF+3DlnBMtmp7L2YDWvbC7mm0M19uNHa9v4v0/387dVB7kyK56bs5MZMyRUw4jPn9FoZNasWUyaNIk1a9aQk5ODqqqoqsqOHTvYu3cv8fHxJCQkaB2qEELYSVJTCHFGo6bP5NBW25u1qqNHfDqpqdM5PrG3emGlpjNTWtq3BveY0tJOmfDs2L2bil/+ylbKcI4Cp06VIUHi3FXnn7ry8VSKN8CH90B4EhgDwBho+/Lr/dcYAMYg27/2fb37df0cmuAJiVdxXpz7aubXDV6PVEWnMP3yVKITg1n9cj7dZgs9Zgurnt9HTWky0y9PRec04VkIV9D56Qm+YBhB04fQsaeWlnWldFf29p+0qrTnVNOeU41/ehQhcxIwJXtO8k+vU7h4bBwXj42jsKaV17eU8M7OUlo6ewDo6Lbwn60l/GdrCdOGR3JzdjILxw3B6AFDk04lODiYyy67jKlTp7Jq1SqOHrUNJzObzRQWFlJeXs7QoUMZM2YMiiKPL0IIbUlSUwhxRrHDR9iTmr4+LMibe2qei1MlPI3x8VT+5rd0FRWd/Y3pdETfc/fABih8Q+G6/p23643+nac32ROd07rBojdBYVzf5KgxAPyCHPvMLf1LvFbny9R2D+K8/Hx//X5UVR3UN/sjMmMJjwvk86f30lRje27KWVlMbWkrl9yejinQOGixCN+l6HUEZsYSMCmGzoMNtKwtpauo2X68c38dnfvr8BseSsicRPxHR3hUUiw1JphfXZbOTxeO4sPccl7dXMSByhb78W1H69l2tJ64UBPXT0vi+9OSiA31P80turchQ4Zw8803c+jQIb744gvq6uoA6Ozs5O233yYlJYWFCxcydOhQjSMVQvgySWoKIc6o7wR03x4WpNM799T0runnAyEoO5sRKz+n8d13qf77P7DU15/xnKg77iAoO3sQohNex9xy5usMJIsZOszQ0YC9MUeriz7oKVwnSU0PMixoGGGmMJrMTbR0tXCs5RiJoYmDGkPUsGCWPjKFL1/MoyTP9thbklfHO3/aweK7JhA5zHOW/grPpigKAWMiCRgTibm4mZa1pXTmO14PdB1tpu5oHsYhQYTMTSAgIwZF7znJzUA/A9+fnsT10xLZXtTAK5uLWLWvkp7e4UFVzWb++VUB//76MIvGD+GWC1KYkuxZCdzjFEVh9OjRjBgxgu3bt7N69Wp6emxVqkVFRTz77LNkZmZy0UUXERISonG0QghfJElNIcQZxQ53TECvKT6K1WpB199lmB5O77z83IcrNc8kfOlSwpcupfHdd6l97nm6S049Dbj58xVE/3AZuiB5wy3Okamfb6BGLYIhE6C7vferA7rabP92d0B37+Uup+Pd7cAgTrsd7IStOC+KojA2cixbKrYAtmrNwU5qAvgHGVly70S2flRIzqpiAJqqO3j3zzuY/4N0UjNjBj0m4dtMyaGYbhlHd1UbLeuO0b6rBnqTf92VbdS/dRD9qiJCLkwgcHIcOj/PeX2pKArThkcybXgkVc2dtmXo20qoabH1Fe2xqny6p4JP91QwdmgoN2cn891Jwwj087y34AaDgezsbFpbWykqKqK8vBy1t91Qbm4ueXl5zJo1i+zsbIxGqQwXQgwez3tEFUIMuqDwCILCI2hrbKDHbKahopyo+MF/s+YO+k4/l0rNMzme3DyxB6df6nDKfvRjrG1tdBeXUPnHPzLsd7/TOlzhaVLn9O+8+Y+eexWkqkJPpz3RuW3jOnTWTqZMSHdKfjonSXsvl2499+Xn0P+ErdBMelS6I6lZt5+FKQs1iUOnU8i+cgQxSSGsfmU/PV1Wus0WPn92L1OWpDBtyXAU6bMpBpkxLojIa0cTekkyrevLaNtWidpt6zVsaTDT+NERmr8qJviCeIKzh6LzsJYJcaH+PLhgFPfOG8mqvEpe3VzE9qIG+/H8imZ+8f5e/rgin2umJHLTjGRSoj3vw1yj0UhaWhpXXHEFX3zxBQUFBQB0dXXx9ddfs3PnThYsWMC4ceM8sjJVCOF5JKkphDgrsSmpHN21E7D11fTVpGbf6eeS1DxbJ+vBOeRXv6T8548A0PTuewRfeCGhl1yiRXjCU8WOheSZ55Y0TJ7Vv2XditLbJzMAiKI9qHf6a/IFpz+vOh+emnHu99ffhK3QjHNfzcEcFnQqIyf39tl8Zg/Ntbbnqx2fFVFb2sr8W9MxBcjbADH4DOH+hF82gpCLkmjbXE7rpnKs7bblzNa2Hpq/LKZl3TGCpg0heHY8hjCTxhGfGz+DjssmDuOyicPYX97Ma1uK+DC3nI5uCwDNnT28sOEoL2w4ytzRMdycncycUbHoPeyDhpiYGG644QYOHz7MqlWrqKmxTYZvamri3XffZevWrSxcuFAmpQshXM5zx7IJIQaV8xL06qO+21ezz6AgL59+7mqhl19O6OJL7duVv/wV3VVVGkYkPNKch0E5y5czig7m/My18ZzoeOL1XPQ38So0NS5ynP3y8WFBWotOCOaaR6aSODbCvq9oTy3v/mkHDZVtGkYmfJ0+yEjo/GSGPDKN8MtS0Yc7kpdql4XWDWVU/mU79e8coru6XcNI+y99WCh/vGoCW35xMf+7ZCwpUYF9jq89WMNtL+9g3t/W8tw3R2hs79Io0v4bOXIkd911F0uWLCEw0PH9lZaWsnz5ct5//32ampo0jFAI4e0kqXkaiqL8j6IoqqIo/9Y6FiG01ndYkO9OQHdefm61dqKqVg2j8WyKojDk17/G0Ds109LURMUvfoFqlZ+pOAepc+Gyf505sano4LLHbdcfbO6eeBUDIiEkgRCjrW1Ak7mJirYKjSOy8Q828p37JjJpQZJ9X2NVO+/8aQdH99RqGJkQoPPTEzwzniE/m0LEtaMwxDkl/iwq7TurqPrHTmpf24+5pPnUN+TGwgKNLJudytcPzeXlW6dy0ZhYnFdml9S384cVB5j+h9U8/O5u9pV5VhJQr9czdepU7r//frKzs9HpHM93e/bs4YknnmDNmjV0dXle0lYI4f4kqXkKiqLMAH4I7NE6FiHcQWyKU6VmUaFbVKBoQVF06HSOagKr1axhNJ5PHxbGsD//ieOv7ts2bab+lVc1jkp4nKyb4aYPbBWOJ5M8y3Y866bBjes4T0i8ivOmKEqfJej76/ZrGE1fOr2OmVePZMHt6RiMtv+H3Z0WVjy1h+2fHUW1+uZzunAfil5HUFYccT/KIuqWdPxSQh0HVejMq6Pmqd3UPLeHzoP1Hvk6VKdTmDs6lhd/MJV1P53HHRemEhbg6B1q7rHy3x3H+M4TG7j66U18tKuMrh7P+aA3ICCAhQsXcu+99zJmzBj7/p6eHtatW8cTTzzBrl27sMqH10KIASTNdE5CUZQw4A3gduBXGocjhFsIi43DLyCQro52OluaaamrJTTaN6eo6nT+9mSmxdLRZ0m6OHdB06YRtWwZdc8/D0DN3/9OUPYM/J1eEAtxRqlzbV/V+VC4zjY93BRi603pDku5s26G8CRY91co3vDt48mzbBWaktD0aOlR6Wyr3AbYkprzk+drHFFfo6YOISIuiM+f2UtLva3P5rZPjlJT0sL8W9Px85e3BkJbik4hYGwUAWOjMBc10bL2GJ0H6u3HzYVNmAubMA4NImROAgEZMSh6z+pHCZAUFcj/LB7Lg/NH8cnucl7ZXEReuaMSdWdxAzuLG/i/4Hyun5bI96cnMTTMM15vRkVF8b3vfY+jR4+yatUqKisrAWhpaeHDDz9k27ZtLFy4kOTkZI0jFUJ4A4+r1FQUZamiKE8oirJeUZTm3uXhr5/hnARFUV5UFKVcURSzoihFiqL8U1GUiFOc8hzwrqqqXw/8dyCEZ1J0OlmC3ss5iSnDggZGzP334T/O1o9O7e6m7Kc/xdopP1vRD7FjYcZdtgThjLvcI6F5XOpcuPUzuGcLXHC/Y39Ykm2/JDQ93thIp0rNevep1HQWkxTCNf8zhfjR4fZ9R3fX8u6fd9JY5ehd+ORdX/PkXfJSWGjHlBJG9A/GEffjLAIzY/u8c+2uaKP+rYNUPraD1i3lqL2DeDxNgJ+ea6cm8un9s3jv7gv47qRhGJ2StLWtZp74+jCz/ryGu1/fyeYjdR5TpTp8+HDuuOMOLr/8coKCHJPey8vLeemll/jvf/9LQ0PDaW5BCCHOzOOSmsD/AvcBk4CyM11ZUZQRwE7gVmAb8A+gEPgRsFlRlKgTrv9DYCTwywGNWggv0Cep6cPDgpz7aloskngbCIqfH8P++leUAFvCuOvwEar/9pjGUQnhIrFjYe4vHNvNZdAjvca8QXpUuv1yfl2+2yYfAoL9uPyBSUy8KNG+r6GijXf+tIPifXUaRibEtxmHBBF53WiG/GwqwRcMQzE63sJa6jtp/PAIFX/eTvOaEqwdPRpG2n+KojA5OYJ/fS+TTY9czEMLRjEk1On1plXl832VXP/8Fhb+8xte21JMm9n9v1edTkdWVhYPPPAAs2fPRq/X24/t37+ff//733z11Vd0ygfZQoh+8sSk5oPAKCAUuPssrv8UEAs8oKrqFaqqPqKq6kXYkpujgd8fv6KiKKOBPwA3qKoq7y6EOEGfCehSqQmA1SoT0AeKKXU4cY88Yt9ueP11Wr/5RsOIhHAhvyAI600oqRao993HVG+SFJpEkNFWkVTfWU9Ve5XGEZ2aTq9j1rVpzP/BWPQG21uCro4ePn1yNztXFmkbnBAnYYjwJ/zyEQx5ZBohFyehC3S0S7C2dtO8qpiKP26jcUUhlmbP7XkeE2Li/ovT2PDzeTx9QxYzUiP7HD9U1covP9zHjD+s5tGP8zhS06pRpGfPZDJx8cUXc9999zGud2UOgMViYcOGDTzxxBPs3LlT+m0KIc6ZxyU1VVVdo6pqgXoWH30ripIKXAIUAU+ecPjXQBtwk6Iox+vhs4FoYJ+iKD2KovQAc4B7erdNCOHD+i4/991KTX2fSk1Jag6k8GuvIfjii+3b5f/z/+ipk6oh4aWiRzku1x7ULg4xYHSKjjGRjn7A7jQs6FRGzxjKVT/LIjii92WuCls+lCS7cF/6ICNhC5IZ8vNphH0nFX2Y4y2a2mWh9ZsyKv68nfp3D9Fd036aW3JvBr2OSzOG8tYd2Xzx4IXcOCOJQD9HpWOLuYeXNxVx8WPruOmFrXyRV4nFzYd+RUREcM0113DbbbcRHx9v39/W1sYnn3zCs88+S2GhPP4IIc6e4q7LYs6GoihzgTXAG6qq3niS48uA54HnVFW98yTHV2FLes5XVXW1oijhQMIJV3sJKMBWwZl3pmSqoig7T3FoTFpaWuBzzz132u/JE7W0tAAQEhKicSTC1VSLhdwXHke12PoWTbz1Xgz+ntG0fCBZrH8FDgCgU36Coow7/Qku4q1/e0pLC1H/9zv0zbaG+eaMDBrvuds+IV0IdzAQf38jDi8n8dgnABQOv4GS5GsHJDahrffr32dNyxoAFoUtYkn4Eo0jOjs9nSqlG1Xaa/ruH/VdBWOA+zz+eutznzgPVgipUAg/qmBq7ft/VUWlLQ4aUq2YwzSKbwC1d6tsLO9hdUk3lW3fflsa5a8wL8nAhQlGQv0G/u92IP/+VFWlqqqKwsJCurr6LpKMiopixIgRBAYGnvf9COENvP2574477qCgoCBHVdXJ53qut484HN3776FTHC/AltQcBaxWVbURaHS+gqIobUC9qqr7XBSjEB5D0esJiIymvca2nK69tprQBF+cXOhctN2tWRTeSg0JofmWW4h44gkATHv3EvDNN3TMmaNxZEIMrPZARz/DoLZjGkYiBlKin+P3WtpVqmEkZy/vrVMv+Tz0kQr0TZ6M+57HLfYS3kwHLfEqLcNUAmsgolBHQKMtoaegEFwFwVV62iNVGlKtdEQB7pOnPyeBRoUFyUbmJxnYX2flq5JudlVb7H+hdZ0q7x7q5sPD3UwfYuDiZAOpYfrT3qZWFEVhyJAhxMTEUFpaSklJiX35eV1dHfX19cTHx5OcnIzRaNQ4WiGEu/L2pObxz+OaTnH8+P7wgbrDU2WWFUXZGRISkjV37tyBuiu3sXbtWgC88XsT39Z1aC97V68CYGhoMFN98Pe+d997VFfvBmBs+giGxM3VJA6v/tubO5fKpkYaXn0NgLAPPmTSjTdiGjHiDCcKMTgG5O+v2A8OPQVAnL6ROG/8W/ZBSY1JvPrRqwBUU+0Rj9F5b53blHMtvyevfu4TA0JVVbqKmmlZd4zOA/X2/YH1CoH1eozDggiZk0hARjSKzkOzm8A84F7gWEM7b2wt4e3tpdS32Soee6ywsbyHjeU9TEwI4+bsFJZMGIq/8fwSnK78+2tubmb16tXs3m17ja2qKseOHaOuro558+YxefLkPoOGhPAl3v7cdz4VqN6e1DyT489ip1xSrqrq3MEJRQjPEJsiw4KsFkd1Zm3NaoKDRhMcPOo0Z4j+iH3oIdo3b8FcUIDa2UnZz37G8LfeQvHz0zo0IQZGn56aBWC1gk4q4DxdcmgyAYYAOno6qOmooaa9hpjAGK3DOq17n7noW/uevKtvonPWNWlMvDjxW9cTwt0oioJpeBim4WF0VbTRuq6U9j010FuQ3F3eRv2bB9B/4U/IhQkEZcX1majuaRIiAvn5ojH86OI0PttTwatbitld2mg/vvtYEw+9s5vfr8jnuqmJ3DozhdgQ/1PfoEZCQ0O58sormTZtGitXrqS01Fbp3tHRwYoVK9i2bRsLFy4kLS1N40iFEO7Ecx+9z87xSsxTdVAJPeF6Qogz6DMs6KhvDQuqr9/Izpzrqa370r6vqvoTtm67lJ0511Nfv1HD6LyPzmRi2N/+Zk9imvfnU/P44xpHJcQACoqGgN6ptt3t0CxL0L2BXqfvMywovz5fw2gGzsb3DlOaX3/mKwrhRvyGBhH5vTEM+elUgrKH9kleWuo6afzgMBV/3kbz2lKsnT0nvY1jj6zn2CPrByvkfvM36rl6cgIf3TuTD++dyVVZ8fgZHN9vfVsXT689wpy/rOVvqw7S3OmeLZTi4+O57bbbWLp0KWFhjrfxtbW1vPHGG7z++utUV1drGKEQwp14e1Lz+CjRU5VQHf+Y51Q9N4UQJ4hJTkFRbA8d9RVldHd2ahzR4Cgv/y+5u35AY+O2kx5vbNxG7q4fUF7+ziBH5t38R48i9qGf2LfrXniRti1bNYxIiAEWM9pxuUZejniLsZFj7Zfz6vI0jOT8xQ231QCoVpVVz++jsdpzp0kL32WI9CfiuyMZ8vOphFyUiBLgWLBobe2meWURFX/cRuPnR7E0d53mljzDpMRw/n7tJDY/chEPLxpNfLhjsGdHt4V/rznMnL+sYfn6Qjq7LRpGenKKojB+/Hjuu+8+Lr74YvycVukcPnyYp59+ms8++4y2tjYNoxRCuANvT2qu6f33EuV4FqaXoighwEygA9gy2IEJ4amMJn8ihsXbNlSVmpKj2gY0COrrN5J/4P9hX7d0SlbyD/yPVGwOsIibbiJo5kzbhqpS/vOfY2ls1DQmIQZMnyXoktT0FulR6fbL++v2axjJ+bv0rgyCwnor5tt7WPH0Xro6Tl7RJoS70wf7EXZJCkMfmUbYklT0oY5kmWq20LruGBV/3kbD+wV013ZoGOnAiAo2cc/ckXzz8DyeuXEyY4Y4+tY1tHfzu8/yuehva/nvjlIs1lN2ZNOM0Whk9uzZ3H///WRlZdn3q6rK9u3befzxx9m0aRM9PfKYJISv8uqkpqqqR4AvgBRsfZSd/QYIAl5VVVU+4hHiHPRdgu79fTWPFv2bMyc0j7NytOhJV4bjcxSdjqF//AP68HAAeqqqqHj0N6iq+734FuKcOVdq1h489fWERxkb5ajUzK/z7OXnQWEmLr17AvreZawNFW18+dJ+VDdMgAhxtnQmPSGz4xny8FQilo7CEOOoZMSi0ratkqrHdlD3hmf//R6n1yksGj+EFQ/M5p/XTSIx0vH9ljd18vC7e1j0z2/4Iq/SLV9fhYSEcPnll3PXXXeRkpJi3282m/niiy946qmnOHDggFvGLoRwLY9LaiqKcoWiKC8rivIy8Ejv7uzj+xRF+dsJp9wDVAOPK4ryoaIof1QU5WvgQWzLzv+fC2MNURRlmKIowwCj1Xq2SREh3FvscOdhQd7dV7O19dApl5yfSmPjVlpbpeJqIBljYxn6u/+zb7esXEnThx9pGJEQAyRalp97o9SwVPz1tkEcVe1V1HXUaRzRubv3mYvsA4TiUkKZd5OjT2jRnlq2fuL9H2oK76cYdARNiSPuwclE3TQWv0SnCbwqdOyttW92V3l+HYxOp3BFZjyrfzKXRy9LJyrIUalaUN3KHa/tZOkzm9l21D375w4ZMoRbbrmF733ve0RGRtr319fX89Zbb/HKK69QWVmpYYRCiMHmcUlNYBJwS+/Xwt59qU77ljpfubdacwrwMjAdeAgYATwOZKuq6spXmQ8BZb1fGVVVVS68KyEGT59KTS+fgN7QsGlQzxOnFjJ/PuHXXGPfrvq//6OrpETDiIQYANFOU1ylUtNrGHQGRkU6Wgt4w7Cg0dOHMGlBkn175+fFFOyQ17bCOyg6hYBx0XSVtpzyOlX/yLEPDfKU4UGn4mfQ8YOZw1n38Dx+PD+NID+9/djO4gaufXYzt728nfyKZg2jPDlFURgzZgz33HMPCxcuxGQy2Y8VFRXxzDPP8PHHH9Pa2qphlEKIweJxSU1VVR9VVVU5zVfKSc4pVVX1VlVVh6qq6qeqarKqqj9SVdXVH0E9BsT3fu2Ni4tz8d0JMTicKzVrS4qweHEfm56e/r0gqm+QVr2uEPeLR/DrXXZkbW+n/GcPo3rx/z/hA8ISwRhou9xeB22eV9EnTs55WJCn99U8LvvKESSNc1RHff1KPjUlp04CCSHcW7DJwI/nj+Kbh+dx68wUjHrFfuzrA9Usfnw9D769i9J69xsQZjAYyM7O5oEHHmDq1KkoiiP2nJwcHn/8cdavX093t3tOeRdCDAyPS2p6ElVVW1RVLVdVtRzo1unkxy28Q0BwCCHRMQBYenqoLyvVOCLXMRiC+3Vebe1XMjDIBXSBgQz761/BYJta2rF7N7VPP6NxVEKcB50OokY6tqVa02uMixpnv+zpfTWP0+kULrl9HOFxtkR8T7eVFU/vod0LpkULAZDwp9nf+joZxU9PxNVpXtPDMSrYxK8vG8fXD83lqsx4jucHVRU+yC3josfW8ujHeTSb3e/7DQoKYsmSJdx9992MHOl4Pu3q6mL16tU8+eST5OXlec3vSgjRl2TZhBD9Epvi3FfTe5egR0Rc0M8zVRkY5CIBGeOJuf9++3bt00/TnpOrYURCnCfnYUE1ktT0Fs7DgrylUhPAFGhk8d0Z+Pnblqu2NphZ+dxeLD3SO154t5CLk+zvntUuCw3vFVD3yn4sLd6T1E+MDOTv101ixQOzuXhMrH1/t0Xl5U1FPPxNOx8UdNFqdr9VMrGxsdx4443ccMMNREdH2/c3Njbyzjvv8NJLL1FWVqZhhEIIV5CkphCiX/pOQPfeYUHBwaMID5/Wr3NlYJDrRC27ncApU2wbVivlDz+MRXonCU/lPCyotkC7OMSAGhE+AqPOCEB5WzmNnY3aBjSAIoYEseD2cdBbzVVxuIlv3j4klVDCq4UtSCb27kl9JqV3Hqin6h87ad9bo2FkA2/s0FBe+MFU/ntnNpOTI+z7Oy3w0ZFu5vxlDS9tPIq5x6JhlCeXlpbG3XffzeLFiwkIcPyuSkpKeP755/nggw9obna/XqFCiP6RpKYQol/6TkD33kpNgOEp92F/53aOZGCQayh6PcP+/Cd0IbYppd3HjlH1u98DYC4ooP7V16h9+mnqX30Nc4EkiYSbi3EMlJHl597DqDMyKsLxu91f7z3VmgApGdFkX+F4LbB/fTn71kkVlPBufokhxD2QSfDMYfZ91vYe6t84QN1bB7C2e1f/xmnDI3n3rmyev3kKo+IcLZnq2rr4zSf7ufixdXyQewyr1b0+0NDr9UybNo0HHniAGTNm4NwGbvfu3TzxxBOsXbuWrq5vV9k++uijPProo4MYrRDifEhSUwjRLydOQFet3rvsLDJyJjHR8/t1bkuLd72JdSfG+HiG/PrX9u2mDz/kyJIlFF52OVV/+AM1/3qcqj/8gcLLLqf4xpto27xZw2iFOI1op6RmjVR3e5P0qHT7ZW9agn5c5iVJjJrmGIS54b8FlB1s0DAiIVxPMeoJv2wE0csy0Ic7Jm937Kqh6p85dB7yrr8BRVFYkB7H5z+6kNvH+xHp7/ig/1hDBw++vZvFj6/n6wNVbletHRAQwKJFi7j33nsZPdqxKqK7u5u1a9fyxBNPsHv3bqxe/D5GCG8nSU0hRL+EREXjHxIKQFdHO03VVRpH5FqBgalnvtJJVFS+T3n5OwMcjTgu7DtLCL38Mvt215GTVw2379hBye3LaHzvvcEKTYizFzkCFFt/QppKoKtN23jEgHFOanrLsCBniqIw78YxxCbbquatVpWVz++jubZD48iEGDinGhjkPzKcuB9nETjZkdi3NHdR++I+Gj48jLXL/ZZmnw+9TmF2gpE/zQ7gf5eMJSLQaD92oLKF217ewXXPbmFnsfsldaOiorj++uu5+eabiYtz/L5aWlr44IMPWL58OSUlJRpGKIToL0lqupCiKCGKogxTFGUYYJRPgIQ3URTlhGpN7+2radO/5eegkn/gf2QSuguFLlx4dle0Wqn45a+kYlO4H4MfRA53bEtfTa/hrcOCnBn89Fx6VwYBoX4AdLZ2s+LpvXR1ut8gESEGms7fQOQ1o4i6KR1dkCPJ17algup/5WAu9r7ejX56hWWzU1n38Dzuv2gkAUa9/di2onqufnoTP3x1BwVVLRpGeXKpqanceeedXHbZZQQFBdn3l5eX8+KLL/LOO1KIIISnkaSmaz0ElPV+ZVRVeXclm/A9Jy5B92Z6fcCZr3RKVpmE7kL1L7189le2Wql96mmXxSJEv/UZFiRL0L1FWngaBp0BgGOtx2gyN2kckWsER/hz6Z0Z6Ay2DwDrylr5+pV8VDfrsyeEqwSMiyLuwSz8x0XZ9/XUdVLzzG6aVhah9nhfcUuov5GHLhnNuofncnN2MgadowDgy/1VLPznN/z0nd2UNbpX5bZOp2Py5Mncf//9zJo1C73ekZTNy8uzX3a3pfRCiJOTpKZrPQbE937tdS51F8Ib9BkW5MUT0AEMhuAzX+k0ZBK6a5gLCmjfseOczmnfvl2GBwn302dYkDxWeAs/vR9p4Wn27QP1BzSMxrWGjghjzvWO5PyR3Bp2fF6kXUBCDDJ9sB9RN44l4ppRKKbeRJkKLWtLqf73LroqvLO1SGyIP7/97nhWPzSHyyc6DVBS4d2dx5j3t7X87tP9NLR9eyiPlvz9/dmwYQMWy8nbBPzmN7+xDw2S4UFCuC9JarqQqqotqqqWq6paDnQ7T10Twhv4UqVmRMQF530bMgl94LVt3jKo5wnhMs6VmjUyAd2beHtfTWfpM4cxYV6CfXvbJ0cpzK3RMCIhBpeiKARNjiPuwSxMI8Pt+7sr26j+dy7Na0u9toI5OSqIx6/P5NP7Z3HhqBj7/q4eK8s3HOXCv6zh318X0N4lrSmEEAPHoHUAQgjPFTF0GAaTiR6zmbbGBtoaGwgKj9A6LJcIDh5FePg0Ghu39fs2enpaBzAiAWBt69/PtL/nCeEy0VKp6a3GRnp/X01nM5eOpL6ijWMHbMNCvnx5P0tjJxMVf34rHoTwJIZwf6JvG0/b5nLb8vNuK1hUmlcW0ZlfT+Q1ozBEn09rI/c1Pj6MV2+bxqYjtfx55UF2lzYC0GLu4W9fHOLlTcX86OKRfG9aEka9tkU/J1Zf9vT08Lvf/a7PvujoaL7//e8TGRk5iJEJIc6WlA4KIfpNp9MTk+wYbuHtS9CHp9zH+Txsnu8SdvFtuqD+/Uz7e54QLhPtWKJM3RGwSCWLt3Cu1Nxf7/1JTZ1ex8Jl4wmN9gegx2xhxdN76Gh1r6WnQriaolMInhlP7AOZGBND7Pu7ipup+lcOrVsqvLpv4wUjovnwngt45sYsUmMcQ3lqW8388qM85v99HR/vLsfqRpWrBsO3a75qa2tZvnw5paWlGkQkhDgTSWoKIc5LbIpTX00vX4IeGTmTsWN+T38noQ/EEnbRV1D2jEE9TwiX8Q+FkN5eZNZuaDiqbTxiwIyKHIVesfXXK24uprXL+yvF/YONLL5nAsbevoLNtZ2sen4fFov3DUsR4kyMMYHE3jWR0AXJ0DtMR+220vjhYWpfysPSZNY4QtdRFIVF44fyxY8v5E9XZTAk1N9+rLiunQfezOWyf2/gm0M1bpfgvfrqq+1DhNrb23nllVf6DBISQrgHSWoKIc5Ln76aXl6pCTBs2LVkTnoFvT7kzFd2Eh4+neDgUWe+ojgnprQ0AqdMOadzAqdOxZSWduYrCjHYnIcFSV9Nr2HSmxgR7vgAML/eu/tqHhc1LJj5tzqqVMsONrLxncMaRiSEdhS9QujFScTeOwlDXKB9v/lQA5X/yKF9V7XbJfUGkkGv43vTklj7s7n84tIxhAUY7cfyypu5+cVtfP/5rezqXaruDjIyMrj55psJCLC1Cejp6eGdd95h48aNXv27EsLTSFJTCHFe4pwmoB/aulHDSAZPZORMJmQ8ydlXbOoYnnKvK0PyadH33gNnO4hNpyP6nrtdG5AQ/eU8LEj6anoVXxoW5Cx1UgzTL3e0qdm79hj7N5RrGJEQ2vKLDybuvkyCL4y3v4xUO3uof+sg9W8ewNLWrW2ALuZv1HPnnBF887N53D13BP5Gx+u3zYV1XPHkRu5+fSdHatyjoj05OZlly5b16af55Zdf8umnn55yaroQYnBJUlMIcV6iEpPR9S7NADC3t2kYzeCxLUX/A2d+GNUxdswfiIycORhh+aSg7GyG/vY3Z05s6nQM/b/fEpSdPTiBCXGuYmRYkLfqMyzIB/pqOpt8aQojsmLt2+vePEj54UbtAhJCY4pRR/jiVGJ+OAF9pGM5dseeWqr+uZOOA/UaRjc4wgKN/HzRGNb9bB7XT0tCr3MUCny+r5JL/vENv3h/D5VNnYMe26OPPtpngFBUVBTLli0jKSnJvm/nzp385z//obNz8OMTQvQlSU0hxHkxGI1ExSfat2uKfKcPnG0p+suEh08/6fHw8OlkTnqZYcOuGeTIfE/40qUkvbCcwKlTT3o8cOpUkl5YTvjVVw9yZEKcA+dKTVl+7lV8tVITbD31Lr5lLNGJtgFtVovKymf30lIvyQDh20ypYcT9KJOgaUPs+6wt3dS9nEfD+wVYzd4/MC4u1J8/XpXBFw9eyJKMofb9FqvKm9tKmfPXNfzx83ya2rWtYA0MDOTmm28mIyPDvu/IkSO8+OKLNDY2aheYEEKSmq6kKEqIoijDFEUZBhitVmmOLrxT7HDnYUHe31fTWWTkTCZn/YfU1Ifs+0JCMpg+7XMmZ/1HKjQHUVB2NsmvvUrgTMfPPOzKK0j95GOSX3tVKjSF+4t2rtQsAOnZ5TVGR45Gp9hedh9tOkp7d7vGEQ0uo0nPpXdlEBBi66PX0dLN58/spbtLlm8K36YzGYi4Ko2oH4xDF+LoM9m2rZKqf+ViLmzSMLrBMyImmCdvyOKje2cyc2SUfb+5x8qz6wqZ/ZeveXrtETo0fMwwGAxcddVVzJkzx76vurqa5cuXU14ubTWE0IokNV3rIaCs9yujqqpK43CEcI0+w4K8fAL6qQQGOJakBAQkylAgDRmiHH2PAqdNl6FAwnMEx4J/mO1yVws0y5skbxFgCCA1zPZcqaJysMH3KnFDowJYdMd4dL3LTGtKWljzar4M3BACCBgTSdyPJxOQEW3fZ6nvpOb5PTSuKETt9o3imImJ4byxbAav3T6N8fGh9v3NnT38eeUB5v5tDW9uK6HHos3PQ1EU5s2bxxVXXIGut+1Ra2srL730EgcOHNAkJiF8nUHrALzcY8BzvZdXxsXFZZzuykJ4iseu+84pj+WtW03eutV99j309qeuDklzOp2f/bLV2qVhJEJncvSnUs2yvFF4EEWxLUE/ts22XXsQwuK1jUkMmLGRYzncaJv+vb9uP5mxmRpHNPiGpUUw+3ujWPcfW1K3YEc1UQnBTF6Uom1gQrgBfZCRyO+PoWN3DQ0fHkHt7AEVWr8po/NgA5HXjsYvPljrMAfF7LQYZo6IZsW+Cv626iBFdbbq9qpmM794fy/Pf1PITxeO5tLxQ1CUsx3cOXAmTZpEWFgYb7/9Np2dnXR3d/PWW2+xaNEiZsyYMejxCOHLpFLThVRVbVFVtVxV1XKgW3e203mFEB5HcUpqqpLU1JTi70hqWqWBu/A0zsOCamRYkDdx7qu5v863hgU5G39hPOMudCTrt3xUSNGeWg0jEsJ9KIpC4KRYhjyYhWlUhH1/T1U71U/uonl1CarFN6qbdTqF70wYxpc/mcPvrhhPTIjJfqywto173sjhiic3sumwNo8fw4cP5/bbbyc8PNy+b+XKlaxYsQJpOyfE4JFKTSHEOTtZ5aVz9eZdz75GUHjEt67jzXSKVGq6C52/40Wv2mnWMBIh+sF5WJBMQPcqktR0mH1tGg0VbZQXNIIKX7yYx9KfTyFyaJDWoQnhFvRhJqJvHUfb1kqaPutdfm5Vaf6ymI4D9UReOwpjTKDWYQ4Ko17HjTOSuSornpc2FvHM2iO09A5R2n2sie8v38rstGh+vmgM4+PDBjW2mJgYli1bxltvvcWxY8cA2LZtG42NjVx99dWYTKYz3IIQ4nxJ6aAQYsC11PlexUWf5eeqJDW1pDgvP++SpKbwMDGS1PRWYyLHoGBbJlnYVEhHT4fGEWlHb9Cx6I7xhETaHq+7Oy2seGoPnW3aTjgWwp0oikLwjKHE/SgLv2RHf8nu0haqH8+ldWMZqtU3qjYBAv0M3DtvJN88PI87LkzFz+BIZawvqOU7T2zgvv/kUFTbNqhxBQcHc8stt5Ce7vjg6tChQ7z00ks0NzcPaixC+CJJagohBlxLvY8nNaVSU1POlZpWqdQUnibaabBVje8Nk/FmgcZAUsJSALCqVg41+HbSOiDEj8X3ZGDws70daarp4IsX8rBqNABECHdliA4g5s4JhC5KAb3tgxG120rjJ4XUvriPnkbfeq0TEeTH/ywey9qfzuXaKQnonFpqfrqngvl/X8f/friX6pbBa0FkNBpZunQpM2fOtO+rrKxk+fLlVFZWDlocQvgiSWoKIQZcqw9WaiqS1HQbip/z8nPpqSk8THgy6Hv/D7dVQ0eDtvGIATU2cqz9sq8vQQeITgjh4lsc1U2l++vZ9MERDSMSwj0pOoXQuYnE3peJcYijTYP5cCNV/9hJ284qVNV3qjYBhoUH8JelE1n14wu5JD3Ovr/HqvL6lhLm/GUtf1t1kObOwakA1+l0LFiwgMsuu8w+vKi5uZkXX3yRgoKCQYlBCF8kSU0hxIBrqa/TOoRBJz013YfiXKkp08+Fp9HpT6jW9O1qPm/j3Fczvy5fw0jcx8jJsUxZnGLf3v1VKQe2VGgXkBBuzG9oELH3TSJkbiK93SxQzRYa3jlE3ev5WFp97zVoWlwIz908hffvuYDpwyPt+zu6Lfx7zWEu/Msanv+mkM5uy6DEM3nyZG688UZ7P82uri7+85//sH379kG5fyF8jSQ1hRADYuHdP7Zfbqmt0S4Qjehk+rnb0DlNP5dBQcIjRTtNQK+VJejeRIYFndy07wxn+MRo+/ba1w9SebRJw4iEcF+KQUfYohRi7pqIPsrxmqczr46qf+TQked7xQUAWUkRvHXHDF66dSpjhzp6kDa2d/P7Fflc9Le1/HdHKZZB6EM6YsQIbrvtNsLCbIOLVFXls88+Y9WqVTIZXYgBJklNIcSACIl0vBlp9cVKTRkU5DacBwVJpabwSDIsyGuNiRxjv3yk8Qhmi3zwAraltfNvTSdymG1ZraXHyufP7KXNx3oFCnEuTMmhxP0oi6AZQ+37rG3d1L22n/p3DmHt7NEwOm0oisK80bF8dv8s/nndJBIjA+zHyps6efjdPSz65zd8kVfp8uX6cXFxLFu2jKFDHb+fzZs3884779DVJe8VhBgoktQUQgyI4Kgo+2UZFCTTW7XkPChINcuLRuGBnCs1Zfm5VwnxCyE5NBmAHrWHggbps3acn7+BxXdPwBRkAKC9qYsVz+ylZ5CWjArhiXR+eiKuGEn0bePRhTpei7bvrKLqnzl0HmnULjgN6XQKV2TGs/onc/nN5eOIDnb8bAqqW7njtZ1c/fQmtha6thAjJCSEW2+9ldGjHR9W5ufn88orr9Da2urS+xbCV0hS04UURQlRFGWYoijDAKOUmgtvFhLpSGq21tX6XLNymX7uPpwrNWVQkPBIsvzcq8mwoFMLiwlg0Q/Ho/SOM64uambt6wd97jWFEOfKf1QEQ36cReCkGPs+S6OZ2uf30vjJEVQf/XDAz6DjlgtSWPuzeTw4fxRBfnr7sZySRq57bgu3vrSN/Ipm18Xg58d1113HjBkz7PvKysp4/vnnqa6udtn9CuErJKnpWg8BZb1fGVVVVRqHI4Tr+AUEYgo8vmysh44W1704cEeK06AgVZafa0oxOSWYzbJ0UXigqJGg9L5EayiG7g5t4xEDSvpqnl7CmEhmXTPSvn1wayW7V5dqGJEQnkEXaCTye2OI/P4YdIEG+/7WjeVUPZ5LV2mLhtFpK9hk4Efz0/jm4XncOjMFP70jDbLmYA2LH1/Pg2/vorS+3SX3r9PpWLRoEYsXL7ZPRm9qauKFF16gsLDQJfcphK+QpKZrPQbE937tjYuL0zgcIVwr2Klas6XOt5agK4qe42MoVdWCqvrmJ+LuoO+gIKnUFB7I6A/hyb0bKtQd1jQcMbDGRkml5plkzE1g7ExHH7pN7x2mxEeHnwhxrgInxBD34GT8xzgmgffUdFD99C6avixGtfju6sGoYBO/vmwcqx+aw1VZ8SjHJ8ir8EFuGRc9tpZHP86jttU1H4pPmzaN66+/HqPRCIDZbOb1118nJyfHJfcnhC+QpKYLqaraoqpquaqq5UC3Tic/buHdQqKchwX5WlJTkSXobkIGBQmv4DwsqEaWoHsT5+XnBY0FdFukD/OJFEVhzvdGMyT1+ORg+OKFPBqrXFNFJYS30Yf4EXVLOhFXp6EcX3JthZbVJVQ/tZvuqjZtA9RYYmQgf792Ep//aDYXj4m17++2qLy8qYg5f1nDP748RKt54IctjRo1ittuu42QkBAArFYrH3/8MatXr5bJ6EL0g2TZhBADxjmp2VLnexUVktR0D30GBXXK8nPhofr01ZRhMt4kzBRGfHA8AD3WHgoa5fd7MnqjjkV3jic4wvaYbm7vYcXTe7B0SX9NIc6GoigETR1C3I+z8Bseat/fXdZK1RO5tKw/hmr17b+nMUNCeeEHU3nnrmwmJ0fY97d1WfjX6gLm/GUNL208irlnYFdgDR06lGXLluG8knP9+vW8//77dHfLB11CnAtJagohBkxwpO9WakLfvpqS1NSO4rz8XHpqCk/lXKkpw4K8jnNfzfy6fA0jcW9BYSYuvSsDvdH2lqWhsp1jW1SfT8QIcS4Mkf7E/HACYUuGg6F3vXWPStNnR6l5fi899bKqZWpKJO/elc3ym6cwKi7Yvr+urYvffLKfix9bxwe5x7AO4GNPWFgYt912GyNHOnoI79u3j1dffZW2Nt+upBXiXEhSUwgxYPpUatbWaBiJNqRS0z3oTI5KTRkUJDxWtPPy80PaxSFcQoYFnb3Y5FAuunmMfbu1HKr32hILT971NU/e9bVWoQnhMRSdQsjsBOLuz8QY70jadR1touqfObRtr0RVffvDAkVRmJ8ex+c/upC/XTOR+PAA+7FjDR08+PZuFj++nq8PVA3Yz8pkMnH99dczZcoU+77S0lKWL19Oba3vFYgI0R+S1BRCDJgQ50FB9b69/FwmoGtHMTkvP+/0+RfpwkNFpzku1x0Gqwwf8ybpkU6VmvVSqXkmo6YOIWthkn27Nh8Oba/UMCIhPJMxLojYeyYScnGSPROgdlloeK+Aulf2Y2mR1696ncLSyQmsfmgO/7tkLBGBRvuxA5Ut3PbyDq57dgs7ixsG5v70epYsWcIll1xi39fQ0MALL7xAUVHRgNyHEN5MkppCiAET7MODgkAqNd2FYjCAwWDbUFVU6U0kPFFAOAT39tqymKGhSMtoxABznoB+sP4g3VZ5nDqT6d8dQXKG48PTr189oGE0QnguRa8jbEEysXdPwhDjqEbsPFBP1T920r7X91ZbnYy/Uc+y2al88/A8HrhoJIHHBy4B24rqufrpTfzw1R0UVLWc930pisIFF1zAddddh6H3NWxHRwevvfYae/bsOe/bF8KbSVJTCDFgThwU5GsVcn2TmrLsWUu6E6o1hfBIfYYFyRJ0bxLhH8HQoKEAdFm7KGws1Dgi96fTKSy4bRx+vfNOLN0yJViI8+GXGELcA5kEzxxm32dt76H+jQPUv3UAa7t82AIQ4m/kJ5eMZt3P5nFzdjIGnWI/9uX+Khb+8xt++s5uyho7zvu+xo4dy6233kpQUBAAFouF999/n7Vr1/rc+yohzpYkNYUQA8YvIBCjv+0T354uM51trRpHNLj6DApS5YWglpyHBVklqSk8lfOwoBoZFuRtpK/muTMFGEiapaAz9t1v6ZEEpxD9oRj1hF82guhlGejDHR8It++qoeqfOXQeGpgl1t4gJsTEb787ntUPzeHyiU6JYBXe3XmMeX9by+8+3U9D2/mt1oqPj2fZsmXExMTY961du5YPP/yQnp6e87ptIbyRJDWFEANGUZQ+fTVb63xrCbosP3cffSo1u+R3ITyU87Cg2gLt4hAuMTbSsQRdkppndnwo0OEVKieu1n/mvrX24zI8SIhz5z8ynLgfZxE4Oc6+z9LcRe2L+2j48DDWLunrfFxyVBCPX5/JZw/MYs4oR+Kxq8fK8g1HufAva3h7e8l53UdERAS33XYbqamp9n27d+/m9ddfp6Pj/CtChfAmktQUQgwo576aLT7WV7PPoCBJamrKuVJTlp8LjxXjvPxcKjW9jXOlpgwLEkJoTedvIPKaUUTdlI4uyFEO3balgup/5WAubtYwOvczblgYr9w2jTd/OIOJieH2/S3mHn7+3l5+9+l+LNb+LxkPCAjghhtuIDMz076vqKiI5cuXU19ffz6hC+FVDFoHIITwLiGRTsOC6nxrArpUaroPxd9RqWntlP6mwkM599SsOQSqCopy6usLj3LisKAeaw8Gnbw0P5V7n7kIsC3DBJg1czbP3r/Ofnz2daOYMC9Bi9CE8CoB46LwSw6h4YPDdObZXsv31HVS88xuIocr1I+U3o7OskdE8eE9F7Aqr4q/rDxAYW0bAMs3HOVobRv/uj6TYFP/Htv1ej2XX345kZGRrF69GoC6ujqWL1/O9ddfT2Ji4oB9H0J4KqnUdCFFUUIURRmmKMowwGi1Sr8f4f1CohzLz32uUlORpKa70Pk5LT83S6Wm8FAhQ8EvxHbZ3AStVdrGIwZUdEA0sYGxAHRaOilqKtI2IA9jMOr7bOesKpbhQUIMEH2wH1E3jiXimlEopt6/NRUiC3UkbtbRVdHW5/rHHlnPsUfWaxCpe1AUhUXjh/DpA7NYOM6xhH/1gWqWPr3pvIYIKYrC7NmzWbp0KXq97XfR3t7Oyy+/TF5e3nnHLoSnk6Smaz0ElPV+ZVRVyZsR4f1Cohy9ZVpqfSypKZWabkMGBQmvoCh9l6DLsCCv02dYUL301TwfbY1m9m8s1zoMIbyGoigETY4j7sEsTCPD7ftNLQrV/86leW0p6nksr/ZGgX4Gnr5hMnfPHWHfd6Cyhe/+eyO5Jec3dGn8+PHccsstBAYGArbJ6O+88w4bNmyQyejCp0lS07UeA+J7v/bGxcWd4epCeL5gH67UVJyTmqokNbXUZ1CQWZafCw/WZ1jQIe3iEC6RHunUV7NO+mqeL6nWFGLgGcL9ib5tPOGXpWLV9SbPLCrNK4uoeXYPPXUyuMaZTqfw80Vj+OvSCRj1tpYxta1mrntuC5/sPr8PXpKSkli2bBlRTu+3vvrqKz755BMsFhnmJHyTJDVdSFXVFlVVy1VVLQe6dTr5cQvv17enpm8lNaVS0330GRQkSU3hyfoMC5KkprfpU6kpE9D7LSDENtSktcFM/uYKjaMRwvsoOoXgmfGUXmClM8xRFdhV3EzVv3I0jMx9XTMlkddvn054oO3xqavHyv1v5vKvrwrOq7IyMjKS22+/neTkZPu+nJwc3njjDTpldZLwQZJlE0IMqD7Tz+tqfWo5hEw/dx86GRQkvIVzpaYsP/c6zsOC8uvzsVil0uZc3PvMRdz7zEVkLnC8ud+5sghLj1RrCuEK3cFwbLqV0AXJoLNVIapdjr83S4u8/nU2PTWKj+6dyYiYIPu+f3x1iB+9tYvO7v4/3gcGBnLTTTcxYcIE+77CwkJeeOEFGhsbzydkITyOJDWFEAPKPygYQ++Qlm5zJ10d7RpHNHh0itF+WSo1taWYnCs15VNr4cGipVLTm8UGxhIdYPswsKOng+KWYo0j8kzj58Q7qjXrzRyQak0hXGbkF3qavyyGk/TTrPj9VvvQIF8fHnRcclQQ798zk1kjHYUfH+8u5/rnt1DT0v8P3g0GA1deeSVz586176upqWH58uWUlZWdT8hCeBRJagohBpSiKH0noPvQEvS+PTW7NYxEKCbnSk1JagoPFpEC+t7HlpYK6GzSNBwx8MZGOqo1ZQl6/xhNeibNT7Jv71xZjMUi1ZpCCPcQFmDkpVuncsN0x+NUbkkjVzy5kQOVzf2+XUVRmDt3LldeeSXHW921trby0ksvceDAgfOOWwhPIElNIcSAC/bRvprSU9N9OC8/V2X5ufBkegNEOqaoUlugXSzCJZz7asqwoP4bPyce/yBbtWZLXScHt1RqHJEQ3unwIgsJf5rd5+tEfsNDGfbbC056zFcZ9Tp+d8V4fn1Z+vGV+5Q1dnD1U5tYc6D6vG574sSJ3Hzzzfj39pTv6enhrbfeYvPmzT7VCkz4JklqCiEGXEik8wT0Og0jGVzSU9N9OC8/t8ryc+HpnIcFSV9Nr+PcV1MqNfvPz9/ApAWJ9u2dnxdJtaYQGuk62kz9G/mo0t+2D0VRuHXmcF64ZSrBJgMAbV0Wbn9lOy9uOHpeCciUlBSWLVtGRESEfd+qVatYsWKFTEYXXk2SmkKIAXfisCBfoVOkUtNdWFtb7Jc7cnIxF0h1m/BgzsOCpK+m1xkXNc5++UD9AayqJAH6K2NuAqYgW6KgubaTQ1urNI5ICN8Stni4/XLnwQbq3z6IepLem75u3phY3r07m/jwAMDWnvS3n+7nfz/cR/d5fBgTHR3NsmXLSEx0fMCzfft23nrrLcxmWbkkvJMkNYUQAy4k0keTmn2Wn8sLBy20bd5M8Y03Uf/yK/Z97Vu3UnjZ5RTfeBNtmzdrGJ0Q/RQjSU1vFhcYR6R/JACt3a2UtpRqHJHn8vM3MOliR8+6HZ8XYZVqTSEGTciFCYRc5EiodeytpeH9AklsnsSYIaF8eO9MMpPC7fve2FrCrS9tp6mj/735g4KCuPnmmxk3zvGBWUFBAS+99BLNzY7+nY8++iiPPvpov+9HCHchSU0hxIBzrtRsrffRpKYqlZqDrfHddym5fRntO3ac9Hj7jh2U3L6MxvfeG+TIhDhP0WmOy7L83OsoiiLDggbQhHkJmAJ7qzVrOji0Xao1hRhMoQuSCb5gmH27fUcVTZ8VSm/Hk4gJMfHmD2dw+UTHz2vD4VquemojRbVt/b5do9HI1VdfzaxZs+z7Kisref7556mslH7DwrtIUlMIMeBCfHT5uSKDgjTTtnkzFb/6NVjPUJFjtVLxy19JxabwLFFpQO9UgYaj0COV4N5GhgUNHL8AAxMvdlSK7Vgh1ZpCuJrzwCBFUQj7TiqBk+Psx1s3ltP8VYlW4bk1f6Oef31vEg/Od/TPPlLTxhVPbWRrYf9nE+h0OubPn8/ll19un4ze0tLCiy++yKFDsupDeA9JagohBlxIn0pN3xwUJEnNwVX75FNnTmgeZ7VS+9TTrg1IiIHkFwjhvUka1Qp1R7SNRww4GRY0sCbMS8AvwFat2VTdQcGO85ssLIQ4N4pOIeLqNAIynAodVpfQsr5Mw6jcl6Io/Gh+Gk9cn4nJYEvRNLZ3c+MLW3lnx/m1JMnKyuKGG27AZDIB0NXVxZtvvnneMQvhLiSpKYQYcAEhoegNtjcT5vY2ujraNY5ocHSZa+yX29uP0toqn4IOBnNBwSmXnJ9K+/btMjxIeJY+w4JkCbq3ca7U3FWzi2d3P8sb+W9wuOGwhlF5LlOgkYkXJdi3d6wowio9/YQYVIpOIfK60ZhGOaZxN31WSNs2Wf58KpdNHMZbd8wgOtiWgOy2qPzs3T386fMD5/UYNmLECG6//XbCwsIA+rQCsJ5tUYAQbsqgdQBCCO+jKArBUdE0VdletLTU1xEVH6hxVK5TX7+Ro0X/prFxm31fe/sRtm67lPDwaQxPuY/IyJkaRujd2jZv6fd5prS0M19RCHcQMxoOf2m7XCsJeW9T0lyCXtFjUS2YLWb+vevf9mOT4yZz18S7mDF0hoYRep4JFyWye3UpXZ0WGqvaObyzilFTh2gdlhA+RTHoiLpxLLUv7qOryDakpuGDAhSTnsCJMRpH554ykyL46L6Z3P7ydg5UtgDwzLojHK1t5R/XTSLQ79xTOKcbCPTb3/72nK4vhLuRSk0hhEs4T0BvrfPeJejl5f8ld9cP+iQ0nTU2biN31w8oL39nkCPzHda21kE9TwhNRDt6bcmwIO/yfsH73PXVXVhUy0mP76zayZ1f3skHBR8McmSezT/IyASnKcw7PpNqTSG0oPPTE/2DcRjjg207VKh/+yAdB+q1DcyNxYcH8O7dF3DxmFj7vlV5VVzzzGYqmzo1jEwI9yOVmkIIlwiOjLJfbvHSCej19RvJP/D/gDMt27CSf+B/8PcfJhWbLqALCh7U84TQRIwsP/dGWyq28JvNv8Gqnv55xKpaeXTzowwNHioVm+dg4sWJ7P66lO5OCw2V7RzJqSZtStyZTxRCDCidv4Ho28ZT8+xueqo7wKpS93o+MbeNw5QarnV4binYZOC5m6fwxxX5LN9wFIC88ma+++QGlt88lYyEsLO+rRMrL61Wa58KTZPJxF133UVERARCeBqp1HQhRVFCFEUZpijKMMAo/SqEL+kzLMhLJ6AfLfo3Z05oHmflaNGTrgzHZwVl9+8Nfn/PE0ITzpWatYfPfjCWcGvP7H7mjAnN46yqlWd3P+viiLyLf5CRCXP79tZUpVpTCE3og4zE3J6BPtLftqPHSu3L++kqbdE2MDem1yn873fS+eNVGRh0CgBVzWaueXYTK/dV9Pt2j09DP85sNvP+++9jsZx8xYAQ7kySmq71EFDW+5VRVVWlcThCDJ5gp+XnLV6Y1GxtPXTKJeen0ti4VYYHuYApLY3AKVPO6ZzAqVOln6bwLIGRENj7uNrTAU0l2sYjztvhhsPsrNp5TufsqNohw4PO0aT5SRhNegDqy9s4kltzhjOEEK6iDzMRc/t4dKF+AKhdFmpf2kd3ZZvGkbm366cl8ept0wj1ty207ey2ctfrOTy55nCfoT/9oSi2ZGlpaSnr168/71iFGGyS1HStx4D43q+9cXGy3EX4jpAo715+3tCwaVDPE6cXfe89oDvLpzSdjuh77nZtQEK4gvMS9Br5gMTTba3cOqjn+Sr/YCMZfao1j0q1phAaMkQF2BKbgbYEnbW9h5oX9tJT26FxZO7tgpHRfHDvTFKiHMNX/7rqIA+9sxtzT/8rLOfNm2e/vG7dOkpLS88rTiEGmyQ1XUhV1RZVVctVVS0Huk8s8xbCm/UdFOR9Sc2env4NmenveeL0grKzGfrb35w5sanTMfT/fktQdvbgBCbEQOqzBF36anq61q7+PR/09zxfNmlBIobeas26sjYKd0u1phBaMsYFEX3beJTev0trSzc1y/fS02TWODL3NiImmA/umcn04ZH2fe/nlHHj8q3Ut3X16zZnzZpFUlISAKqq8t5779HZKcOIhOeQLJsQwiWCnXpqttR73/Rzg6F/Q2b6e544s/ClS0l6YTmBU6ee9Hjg1KkkvbCc8KuvHuTIhBggfYYFSaWmpwv269/zQX/P82UBwX5kzIm3b2//THprCqE1v4QQom8ZBwZbSsLSaKZ2+V4srf1LzvmKiCA/Xrt9OtdOcVSgby9q4IonN3K4+uz7kz766KM8+uij6HQ6rrrqKkwmEwCNjY18/vnnAx63EK4iSU0hhEsEhYWj09s+fe1sbaHb7F2f+EVEXDCo54mzE5SdTfJrr5LwlGMokz4yktRPPib5tVelQlN4NudKTVl+7vGmD5k+qOf5uknzkzD42d761B1r5ege71tFIoSnMaWGEXXTWNDb+jr21HRQ++I+rB09Gkfm3vwMOv589QT+Z/EYeltiUlLfzpVPbeKbQ+deiR4eHs53vvMd+/bu3bvZu3fvQIUrhEtJUlMI4RKKTkdwpKOvZquXVWsGB48iPHzaOZ0THj6d4OBRZ76iOG9+w4fbL+tCgmUokPAOJy4/P8/hAEJbIyNGMjlu8jmdMyVuCiMjRrooIu8WGOrH+AudqzWPnveADSHE+QsYHUnkdaOhNznXXd5G7ct5WLtkEvfpKIrCHReO4NkbJxNgtBWStHT2cOvL23ltS/E5315GRgYTJ060b3/66ac0NjYOVLhCuIwkNYUQLtN3Arp3JTUBhqfcx9k/jOoYnnKvK8MRThTn3pqyxFB4i7AEMAbZLnc0QJtUmnm6uybehU45u+cRnaLjzol3ujgi75Z5STIGo+3nXVvaStFe73ttIoQnCpwQQ8TVjg+gu4qbqXttP2qPVcOoPMMl44bwzl3ZDA3zB8BiVfnlh/t49OM8eizn9vO79NJLiYiIAMBsNvP+++9jtcrvQLg3SWoKIVwmpE+lpve9+Y6MnMnYMb/nzA+lOsaO+QORkTMHIywB3sRi6wABAABJREFU0Nv6AACLfNIvvISiQLRT1bEMC/J4M4bO4NfZvz5jYlOn6Hg0+1FmDJ0xSJF5p8BQP8Y5V2t+KtWaQriLoClDCPtOqn3bXNBI3ZsHUC3yN3om4+PD+OjemUxICLPve3lTEcte3UFLZ/dZ346/vz9XXXUVSu+a9pKSEtavXz/g8QoxkCSpKYRwmT7DgrxwAjrAsGHXkjnpZcLDT97jLDx8OpmTXmbYsGsGOTLf5lypqconzMKbOA8LqpGkpje4Ku0qnl3wLFPippz0+JS4KTy74FmuTLtykCPzTpmXJKHvrdasKWmheJ9UawrhLkJmxRM6P8m+3ZlXR8N7h2Sw11mIDfXn7TuyWZwxxL5v7cEarn56E6X17Wd9O4mJicydO9dxG2vXcuzYsYEMVYgBJUlNIYTLhER6f1ITbBWbk7P+w7SpnzjtVZg+7XMmZ/1HKjS14FypKUlN4U2c+2rufgu2PAPV+drFIwbEjKEzeGnRS30Sm1enXc0Hl3/AS4tekgrNARQUZmLcrGH27e2fFUm1phBuJOTiJIJnOSqq23OqafzkiPydnoUAPz3/vj6L+y9y9F4+VNXKFU9uZGdx/VnfzqxZs0hMTARAVVXee+89zGbzgMcrxEAwaB2AEMJ7PHbdd055bPeXK9j95Yo++x56+1NXhzSogoPHOG2pMhRIS8dHQSKVmsKLFK6Fve84to9ts30BJM+EOQ9D6lwtIhMDJMQvxH55dsJsGQrkIlkLk8lbX46lx0p1UTMl++tJHhd15hOFEC6nKAphS4ajmi20ba8EoG1zBTp/A2ELU7QNzgPodAoPXTKa4dFBPPLeXrosVuraurj+ua38ZekErsiMP+Nt6PV6rrrqKp555hnMZjMNDQ2sWLGCK6+UFQPC/UilphBCDBDlhJ5oqirJNK0o0lNTeJucV+G1K6HmwMmPF2+0Hc95bXDjEgPKoHPUG1is8tjlKkHhJtKdqzWlt6YQbkVRFMKvHEnABKdVX2tKaVkny6DP1lVZCfznh9OJDPIDoMti5cdv7+LvXxzEehbL+SMiIliyZIl9e/fu3ezbt89l8QrRX1KpKYQYMCdWXtaXl/HSg7ZJrWFxQ1j2+HItwhpUiqJHVW1vRFXV8q1Epxgk0lNTeJPCtfDJj+BMH5SoVvjkAQhPlIpND+U8MMgqH4y5VNbCJPI2lGHtUak62kxpfj1J6VKtKYS7UHQKkdeNpq7LSucB29Lpps+PovjrCZ4+VOPoPMOUlEg+uncmt728nYLqVgAe//owR2rbeOyaifgb9ac9f8KECRw+fJg9e/YA8Mknn5CQkEB4eLirQxfirMm7bSGEy/gHB9svm1tbNYxk8CiK48WBVGpqx3lQkPTUFB5v3V/OnNA8TrXCur+6Nh7hMnqn55AetUfDSLxfcIQ/6TOdqzWlt6YQ7kbR64i6YQymVMdU78YPD9O+q1rDqDxLYmQg791zAReOirHv+2xPBdc9t4Xq5s4znr948WJ7EtNsNvPBBx9gldfWwo1IUlMI4TL+QY6kZmd7m49UzDknNeUNqWZk+bnwFtX5tqXl56J4gwwP8lDOSU1Zfu56WQuT0eltPZgrC5s4drBB44iEECdSjHqibknHmND7vkKF+v8epGN/nbaBeZBQfyMv3jKFW7KT7ft2lzZyxZMb2V/efNpz/f39ueqqq1B6+9UXFxezYcMGl8YrxLmQpKYQwmV0zoklVcXc3q5dMIPEuVITfCGJ654UWX4uvEXhusE9T2hKr3M8h8jyc9cLifRn7AWOZazSW1MI96QzGYi+dTyGuEDbDivU/SefzsPyQcTZMuh1/Oa74/ntd8eh19kSlOVNnSx9ZhNf7a867blJSUnMmTPHvr127VqOHZP+psI9SFJTCDFoOltbtA7B5Zx7aB7vrSk04JxQl6Sm8GTmfj5u9vc8oSlZfj74shY5qjUrDjdRdqhR24CEECelDzISc3sG+ih/244elbpX92MuOX2loejr5uwUXvzBVEJMtvEq7V0WfvjaDp7/pvC0H+rMnj2bxMREAKxWK++//z5ms3lQYhbidCSpKYRwqZiUVPvlzjbv76upKI75a5LU1M7xJTIglZrCw5lCBvc8oSnnpKb0LBscoVEBjMnuW60phHBP+lA/W2IzzDbRW+2yUvtiHl0VbRpH5lnmjIrh/XsuIDEyAABVhd+vyOcX7++lq+fkzz16vZ6rrroKPz/bz76+vp7PP/980GIW4lQkqSmEcKkAp2FBUqkpBo301BTeInXOma8zkOcJTTkvP5dKzcEzeVEyuuPLMQsaKTskS1qFcFeGSH+ib89AF2QEQO3sofaFvXTXeH+bq4GUFhfCh/fMZEpyhH3fW9tLueXFbTS2d530nIiICJYsWWLf3rVrF3l5eS6PVYjTkaSmEMKlTM7DgnyhUrPPoCBJpmnGefq5qkqPNOG5YsdC8sxzOyd5lu084XFkUJA2QqMDGJ09xL69/bMi7YIRQpyRMTaQ6NvGo/jbHjOtrd3ULt9HT+OZp3kLh6hgE2/8cDpXZcbb920urOPKpzZRWHPy920TJ04kIyPDvv3JJ5/Q1NTk8liFOBVJagohXMo/2LEEsrPV+5Oa9KnUlKWDWlEUBZyWoEtfTeHR5jzc57HltBQdzPmZa+MRLtNn+bk8hwyqyYtSUHqrNcsONlB+uFHbgIQQp+UXH0z0D8ahGG3Pj5YmM7XL92FpOXmVoTg5k0HPY9dO5GcLR9v3Ha1t48qnNrHpSO1Jz1myZAlhYWEAdHZ28v7770vLFKEZSWq6kKIoIYqiDFMUZRhglD904Yuck5pmX6jUdOqpCVJloylZgi68RepcuOxfZ05sKjq47HHb9YVHkuXn2gmLCWD09Dj7tvTWFML9mVLCiLopHXqHffXUdlD7wj6s7d0aR+ZZFEXh3nkjeeqGLPx7k8RNHd3c/MI23tpW8q3r+/v7c/XVV9t72BcXF7Nx48ZBjVmI4ySp6VoPAWW9XxlVVVUahyPE4PN3Wn7eIT01xSBSnJagy7Ag4fGyboabPrAtLT+Z5Fm241k3DW5cYkBJpaa2Jl/qqNY8dqCBiiOypFIId+c/KoKo68fYMxvdlW3UvpyH1Syvw8/V4oyh/PfObGJDTAD0WFUeeX8vv/9sPxZr31ZOSUlJXHjhhfbtNWvWUFZWNqjxCgGS1HS1x4D43q+9cXFxZ7i6EN7H3+cGBUlPTbfhXKkpSU3hDVLnwq2fwdxHHPsSpsI9W2z7pULT4zlXakpPzcEXHhvIqGlO1ZqfSbWmEJ4gYHw0EVePsm93lbRQ99p+1G55/XeuJiSE89F9M0kfGmrf9/z6o9z52g7azH1XEFx44YUkJCQAYLVaee+99zCbzYMarxCS1HQhVVVbVFUtV1W1HOjW6eTHLXyPc6Wmbyw/l6Smu1CcempKpabwKqGOhv7EjJahQF5E51Ttb5HnEE1MuTTF3pK5dH89lYVSrSmEJwiaHEf4d0fYt82HG6l78wCqRV4DnquhYQG8c1c2C9IdH/J8lV/N0mc2U97YYd+n1+u56qqr8PPzA6C+vp6VK1cOerzCt0mWTQjhUr42KKhvUlNeRGlKemoKb+XcW1NVT3094XEMTn2ZJampjfC4QNKmOldrFvHkXV/z5F1faxiVEOJsBGcPI3Rhsn27c38dDe8cQrXKc+W5CjIZePbGydw5J9W+L7+ime8+uZFdpY32fZGRkSxevNi+nZuby/79+wczVOHjJKkphHApk1OlZqcvVGrinNSUIQ9akp6awmv1SWrK/21vIsvP3cOUxSnQW61ZklenaSxCiHMTMjeR4DkJ9u32XTU0fnQYVT4EPGc6ncIvLh3LX66egKG333BNi5nrnt3Mp3vK7debOHEi48ePt29//PHHNDVJlbsYHJLUFEK4VECfSk3v76npnGxQkWSDppxbfkhSU3gTSWp6LedBQVKpqZ2IIUGkTZFe+EJ4IkVRCFuUQtD0IfZ9bVsraV5ZpF1QHu7aqYm8dvt0wgONAJh7rNz3n1weX12AqqooisKSJUsICwsDoLOzkw8++ACrvP4Wg0CSmkIIl/K5Sk2nN6TIG1JtOS0/V2X5ufAmktT0WpLUdB/O1ZpCCM+iKArh3x1J4KQY+76WdcdoXlOqYVSeLXtEFB/cM5PU6CD7vr9/eYgH395FZ7eFgIAArrrqKntP+6KiIjZt2qRVuMKHSFJTCOFSfgEB9mXAPWYzPd3dGkfkWtJT030oUqkpvJZTpkUeZ7xKn0FBsvxcU5FDgxg5OVbrMIQQ/aToFCKuGYV/epR9X/OqIlo3l5/mLHE6w6OD+OCemVwwwvEz/XBXOd9/fgu1rWaSk5OZPXu2/djXX39Nebn8vIVrGc58FSGE6D9FUfAPCqajpRmwTUA3hEdoHJXr9E1qSk9NTTkPCpKkpvAmiiQ1vZVBJ4OCtHa6gUAnO3bvMxe5MhwhxHlQ9Dqirh9D7St5mA83AtD40REUPz1Bk6XFRH+EBRp55bZp/OqjPN7cVgJATkkjVzy5kRdumcqcOXM4cuQIZWVlWK1W3nvvPe688077hHQhBppUagohXM7fh/pqKk4Pq6q8IdWU4pT4kUFBwqvI8nOv1adSU55DhBDivClGHVE3peOX5Hg/0vDuITr21WoYlWcz6nX84crx/PI76fbPWY81dHD105vYeKSeq6++2p7ErKurY+XKlRpGK7ydVGoKIVzO37mvZqt399WU5eduxLlSU3pqCm8iSU2v1aenpiw/18SJlZeHtlfy5Qv7AYhNCeWaR6ZoEZYQ4jzoTHqifzCOmuf20l3ZBirUvXmA6B+Mwz/Ne1eQuZKiKNw+azgpUYE88GYubV0WWs093PnaTj68dyaXXnopH330EQA5OTmkpaUxduxYjaMW3kgqNYUQLucf7DwsyMsrNZ2TmsgbUi0599RULZL4EV6kT1JT1S4OMeBk+bn7SXLqx1dd3Ex7c5eG0Qgh+ksXaCT69vEYogNsOywqda/ux1zUBMCxR9Zz7JH1GkbomS4eG8e7d1/AsDB/ADq6Ldz1+k5Sx4xj3Lhx9ut9/PHHNDc3axWm8GKS1BRCuFzf5efeXalJn0pNeUOqKZ1UswkvJZWaXkuWn7sf/yCjY0OFkrw67YIRQpwXfYgf0cvGow8zAaB2W6l9OY+uMi9/f+JiY4eG8spt0wj0s70POlrbxs/e2cOSJUsIDQ0FoKOjgw8++ACrtIQSA0ySmkIIlzP56PJz5A2ptvTOlZryuxBeRJKaXkuvk+Xn7q5oryQ1hfBkhnB/opeNRxds+8BC7bRQ++I+jaPyfGlxIfz56gn27S/2V/HKtgquuuoq+76jR4+yefNmLcITXkySmkIIl+tTqdnm7UlN50FBkmzQkqKT6efCS0lS02v16akpH4y5pdL9dVikpYkQHs0YE0j07Rko/raWH9a2bo0j8g6XTRzGbTOH27f/uuoAZT3BzJ49275v9erVVFRUaBGe8FKS1BRCuFzfQUHe3lPT0Q9Nlp9rTC9JTeGlJKnptSSp6Z7ueXoewRG25apdnRYqDzdpHJEQ4nz5DQ0i+rZxKH59UyIW6Zt7Xn6xeAxTU2zDl6wqPPBmLqMzpzNs2DDbPquV9977/+zdd3xT9foH8M83SXdLS4GWvTcqS5agskSGIMjPeUVAhnvc671XEb0CjuvCq3hBlAIFx1Vko4AosofIFKQyZK8ySvdM8/39kTY9p01H5klOPu/XK68mZ+R807TJyZPn+T5LkJ/P3zO5B4OaRORxykZBeXrP1IQyU5MfSLUkhLBdZ6Mg0hUGNXVL1SiI5ec+QwiBRjfWtN0+dYgl6ET+rLgp0JVZByDz1e+jF9/6xbaezYMcF2Q0YOZDnVAryvpF0LWsfDz9vwMYOnwEgoKsJf9Xr17FDz/8oOUwSUcY1CQij1N1P9d9piYbBfkMVaYmnwvSEUXAnkFNfVE2CrLwufUpjW8o6YJ++uBVDUdCROTb4qqFYuZDnWA0WM9X9p9NxaztyRg8eLBtmz179uCPP/7QaoikI6bKNyEick1oRODMqanMoJJgIE1LwqBsFMTgAOkIMzV1S1l+bpZmDUdCpdVrXR3GIAMKCyy4fikb6VdzUK1mmNbDIiIn1H/71jLLlBmZprgwxD/XGcIoymxHVdO1SSwmDWqNN75PAgB8vvM0OjS4CW3btsXhw4cBACtWrEDdunVtHdKJnMFMTSLyOHWmpr6Dmso5NRls0JgyU5PPBemJKqgptRsHuZ1qTk1mmPuUoGAj6rWsbrvNLuhE+mW+nIOsX9nMxlXjejXBkJvq2G5PXn4ILbvcbgti5uTkYPny5bBw7ntyAYOaRORxgdr93MIsG01Z8nJt19PXrEHesWMajobIjZipqVtGQ0lQk+XnvqfxjYoS9EMsQSfSs/Qfz8CSy3N5Vwgh8M7Im9A8zprgkltgwXOLD2PAkGG2bU6cOIGdO3dqNUTSAQY1icjjQkp1P5c6zixSzqnJYIM2snbswOmHRyHv98O2Zde/+BInhg7D6YdHIWvHDg1HR+QGDGrqFsvPfVsjxbya54+koiCP2bREemOMsTa4sWQVIGPjOY1H4/8iQ0yY/XBnRARb399OX8vGhzuvo2fPnrZt1q9fj4sXmRlLzmFQk4g8zhQUBFOI9QRBWiwoyM3ReESeI8BGQVpKXbwYZ8aNR/bu3XbXZ+/ejTPjxiN1yRIvj4zIjRjU1C1lUJPleL6nWs0wVK8TAQAoNFtw7sh1jUdERO4WPbCx7XrG1vMwp+aWvzFVSfO4SLx3b3vb7Z+SLuOIoSHq1LGWphcWFmLJkiXIz8/HlClTMGXKFI1GSv6IQU0i8orQiMCYV1PV/Rz8QOpNWTt24OK/XgMqCwRYLLj46r+YsUn+i0FN3VKWnxfyizGfxC7oRPpU/+1bUf/tWxF2Uy0E1S/63GK2IH3tKU3HpReDb6yDCbc2sd2e/tNxNL65L4KCggAAV69exbp167QaHvkxBjWJyCsCZV5NVVCTpYNedXXmrMoDmsUsFlyd9YlnB0TkKQxq6paqURCDmj6pkWpezWu6nlKHKBAJg0DMkKa229n7ryD/bIaGI9KPFwe2RrcmsQCsfQ5f+v4kut/ez7Z+dzmVVkQVYVCTiLwitNS8mrqlCDZIBhu8Ju/YsXJLzsuT/euvbB5E/ondz3WL3c99X+1m0QgOMwEAMq/n4dr5LI1HRETuFtIkGqHtSr7ASP3+BL/AcAOT0YCPH+qIuCjrtGTXswswY18BWrVurfHIyJ8xqElEXhEaGRhBzYKCNNv11NRfkZl5VMPRBI6sHc51TXR2PyJNCVFynV+e6ArLz32f0WhAw7axttvsgk6kT9GDmgAG6/tt/ql05B6+pvGI9CEuKhSz/tIJpqLf7YHz6ThobIGoqCjVdpxXmqrKpPUAiCgw6L38PCVlG06e+i9SU3fZll27tgHXrm1ATExXNGn8NGJje1ZwD+QKi5N/U87uR6Qplp/rFsvP/UOjG2vg+J7LAIDTB6+hs6KxCBHpQ1DNMET2qIPMbRcAAGlrTiG0VSyEiXlhrrq5cSwmD2mD0z8usC74HSid8jJt2rQy+7GBENnD/0gi8ooQHTcKunBhEfbtH6MKaCqlpu7Cvv1jcOHCt14eWeAwKP6+vLEfkaZYfq5bqkxNlp/7rEbtagBFCdOXTqQhN7NA2wERkUdE9W0IEWrNAzNfzUHmLxc1HpF+jLmlsdZDIJ1gpiYReUWYTjM1U1K2IemPyUClnc4tSPrjZYSG1mXGpgdE9Oju1f2INMVMTd1ipqZ/CIsKRnzjakg+mQ4pgTOHr6Fl19paD4uI3MwYEYRqfRsgbfVJAEDG+jOI6BgHQ3iQxiPzf0II/GPSKxgxaxuOJls/GzarbsStOSVTQz355JOIi4vTaojkJ5ipSUReEaLTRkEnT/0XlQc0i1lw8tRMTw4nYIW0aIHwm292aJ/wLl0Q0qKFh0ZE5EEMauoWg5r+o9ENJU1ETh3kXHtEehV5S10YY0MBAJZsM9I3nNV4RPoREWLC7Ic7IzLEmmv353X1+97evXu1GBb5GQY1PUgIESWEqCuEqAsgiJPdUiBTNgrK00n5eWbm0XJLzsuTmvoLmwd5SM2nngQMVXxbMxhQ88knPDsgIk9hUFO3DIrnluXnvq3xjTVt188cvgaLhVNBEOmRMBkQrZg3N3P7BZiv5Wg3IJ1pWisS79/b3u66/fv3o6CA03tQxRjU9KwXAJwvutyYnJys8XCItBOqzNTM0kem5vXr2726H1UsokcP1Jk2tfLApsGAOq9PQ0SPHt4ZGJG7MaipWyZDycxQzNT0bTUbRCI8OhgAkJdlRvKJNI1HRESeEnZjTQQ3LJpKq1Ai7YdTmo5HbwbeUBuP3d60zPLc3FwkJSVpMCLyJwxqetZ0APWKLgfj4+M1Hg6RdlTdzzOzNByJ+5jNzmWcOrsfVS7m//4PDecmILxLF7vrw7t0QcO5CYgZOdLLIyNyIwY1dYvl5/5DCKEuQT/EEnQivRJCIHpISdAt57eryDudruGI9OcfA1qhR9MaZZbv2bNHg9GQP2FQ04OklBlSygtSygsACgxVLYsk0iE9ZmqaTM51znZ2P6qaiB490OjzhQi/5RbbsugRI9B01Uo0+nwhMzTJ/wlRcp1BTV1Rlp9b+Nz6vMY3lJSgn+a8mkS6FtKoGsJuKvmfT/v+BKTktBPuYjIa8PFDHbE2+FZ8k9seFmk91zl9+jSuXLmi8ejIlzHKRkReoc7U1EemYvXqt1S+kRv3I8cYo0r+5iJvv41NgUg/mKmpW8ryc7PFrOFIqCrqt6kOg9H6wfva+UxkpORqPCIi8qTogU2Aov/5/DMZyDl4VeMR6UvNyBDM/EsnmI0hOGOJsS1ntiZVhEFNIvKKkPBwW3ZRfk42LIX+X1YXGdkSMTFdHdonJqYbIiNbemhERBQQVEFNZonoiapREMvPfV5wqAl1W8TYbp9mCTqRrpliQxHZs67tdtraU5BmfrnoTp0bVcerd7XF0cJatmW/7tnHhkFULgY1icgrhMGA0PAI2+3cLH1kazZp/DSq/lJqQJPGT3lyOFQeBn5IT5ipqVvKOTUt0sLSRj+g7ILOoCaR/lXr0xCGcGtWfWFKLjK3X9B4RPozqnsjdL2xNTKltRlbYUEe1m3bq/GoyFcxqElEXhMSqZhXUycl6LGxPdGm9Zuo/OXUgDat30JsbE9vDIsA9byDRHrCoKZuCSHYLMjPNLqxpLHFuT9SYC7gc0akZ4YwE6L6NbTdTv/5DAqzmEXoTkIIvDXyJqSE1bctW7dpO65n5Ws4KvJVDGoSkdeERpTMcZink0xNAKhb9z507JCImJhudtfHxHRDxw6JqFv3Xi+PjIh0iUFNXWOzIP8SExeOmPhwAIA534LzR1O1HRAReVxktzow1QwDAMjcQmSsP6PxiPQnPNiEFx68E5aigoXqMg1//2IrCi2sYCA1BjWJyGtCVZma+uiAXiw2tic6d/oKDRs+ZlsWHd0V3bquQedOXzFDU2ss4SQ9YVBT19gsyP80uqEkW5Nd0In0T5gMiB7U2HY7c+dFFFzJ1m5AOtWuUTxq1m9iu5129gg+Wn9MwxGRL2JQk4i8JjRCEdTUUaamUkhIyaTWUVGt2RRIS6w+J71iUFPX2CzI/yhL0E8fusq5UIkCQGjbGghuUs16wyKRtuaUpuPRq0G9b7Fdb268hpnrj2DDH5c1HBH5GlPlmxAROWf6/XeVu271x+9j9cfvq5a98M13nh6SxwlG0nwSP2CSrjCoqWulmwWR76vbPAZBoUYU5BYi/Wourl/KRmydiMp3JCK/JYRAzJCmuPzf/QCA3MPXkHciFSFNYzQdl940a9YM0dHRSEtLQ6gwo6HhOp77eh++e+ZWNKwRrvXwyAcwU5OIyGMYSNOSYKMg0ivl3zaDXrrD8nP/YzQZ0KBNrO02S9CJAkNw/SiEdyip0kr9/iQk53x0K4PBgM6dO9tutzJdQXquGY9/sQe5bMxGYKYmEXlQ6czLHYv/h+3ffgkA6H7P/eh5/ygthuVhDKQRkYepMjX54Ulv2CjIPzW6oQZO7LsCwFqC3nFAw0r2ICI9qDawMbIPXQPMFhScz0TOgSsI7xin9bB0pUOHDtiwYQOklKhtyEA1kYPDF4FXlh/Ce/93ExMZAhwzNYnIa0zBwbbrBfn5Go6EAg7jPqQnLD/XNWX5OefU9B/KZkEXj6chL4dZtkSBwBQTiqhe9Wy309aegmQGoVtVq1YNrVq1st1uabR+gbR4zzn8b9dZrYZFPoJBTSLyGmVQ0xwIQU0G0jTGb21JpxjU1DVlUJPl5/4jIjoEtRpGAQAsFomzh1M0HhEReUtU7/owRAYBAArT8pCx9YLGI9IfZQl625DrMMJ6/jNl5e84cDZVo1GRL2BQk4i8xhQcYrtuzs/TcCQexPIH38QSXdITBjV1zWhgoyB/peqCfvCqhiMhIm8yhJpQrX8j2+2MjWdRmBkACRxeVNwwCAAMhfnoUSMXAJBfaMETX+xBShZ/34GKQU0i8pqAy9QkbTHATHrFoKauqTI1JTM1/UnjG2rarp/+/RobhhAFkIgutWGKCwMAyLxCpP94WuMR6YvBYECnTp1st3vFpCMq1Noi5kJaLp77eh8K+ZobkBjUJCKvCbSgpmT9ORF5AoOauqYMalosfH79SVyjKIRFWUtQczIKcPl0hsYjIiJvEUaB6MFNbbezfr2EguQsDUekPx07drQ1Bbp0/izeGtzYtm7Lsav4z49HNRoZaYlBTSLymoAoP+c8jj6KAWbSEQY1dU1Zfs5GQf5FGAQatSspQT91iCXoRIEktFV1hDSPsd6wAGlrTmk5HN2pVq0aWrZsabsdfP0Ununb3Hb7vxuO46fDyVoMjTTEoCYReU2gZWoykKYxlp+TXjGoqWvsfu7fGt2oKEE/eE3DkRCRtwkhED24iS3HIfePFOQev67toHRG2TBo//79eLp3U9zaouR196+L9uPUVWbIBhIGNYnIawIhqCmYqemb2CiI9EQVsJf8+9YZVVDTwqCmv2nQNhYGg/V/9MqZDGSl6bUyhYjsCa4bifBO8bbbad+f5Py6btS8eXNbw6CcnBwcPfIHZjzQEfVirPOZZuSa8fgXe5CTz/fPQMGgJhF5TWCUn5PPYKYm6ZUQUE11waCmrhgMJafnzNT0PyFhJtRpHm27ffoQszWJAk30gEYQQdbX8oKLWcjey5JodyndMGjPnj2oHhGMWX/phGCj9Xf+x6UMTF52EJLnRwGBQU0i8hpVpmaBPjM11fhGSkQewhJ03TIJk+06g5r+qZGyCzqDmkQBxxgdgsjb6ttup/1wGhZmDrqNsmHQqVOncPXqVbRvEIOpd7ezbbN033l88csZrYZIXsSgJhF5TSCUn7NRkI/iN7WkNwxq6paqURDLz/1SoxtLmgWd2HcFMx//WcPREJEWom6rD0NUEADAkpGPzM3nNB6RfpRuGLR3714AwANdGuDeziXB5Gmrfse+M5zTVO8Y1CQir2H5OXkV48ukZwxq6pZBsPzc31WvHY5qNUO1HgYRacgQYkT0gMa22xmbz6EwXa9JHd5XumGQ2WyGEAKvD78B7epWAwAUFEo8+eVeXMvk5049Y1CTiLwmMDI1FZgd6DM4pw7pDoOausXyc/8nhFCVoBNRYArvHI+g2uEAAJlvQdq6U9oOSEeaN2+OatWswcvs7Gz88ccfAIDQICNmP9wZ0WHWLNmLabl45n/7YC7kuZJeMahJRF5jCioJahYWFEBadPjmwuY0PkPwuSA9Y1BTt1SZmiw/91vKEnQiCkzCIBA9uKntdvaeZORfzNJwRPpRumHQ7t27bdcbxIbjwwc62D6Wbf/zGqb/eNTbQyQvYVCTiLxGCKEKbAZGsyAiIg9gUFO3VHNqMlPTb9VrGQNTcMn/aerlbA1HQ0RaCW1ZHSEtq1tvSCBt9QlWELmJvYZBxfq0isOzfVvYbn+y8U/88Pslr4+RPM9U+SZERO5jCg62BTPN+fkICtHvnFOS3c99B58K0hsGNXWL5ef+q6KGQF/+a2eZZU/N7uvJ4RCRj4gZ0gTJx64DEsg7lorco9cR1ipW62H5vejoaLRs2RJHjhwBYG0YNGDAANv65/q1wP6zqdh09AoA4O+LDqDlM1FoUjNCk/GSZ3g8U1MIUUsIMVwIMUIIUdvTxyMi3xZw82qShlh+TjqmnF6BQU1dYaMgIiJ9CYqPQESXklBI2vcnIQv5jbs72GsYVMxgEPjogQ6oXz0MAJCRZ8bjn+9Bdr65zP2Q/3I5U1MI0QHAPQCWSin3l1o3DsDHAIpbHpuFEP+UUn7k6nGJyD8FVgd0nqz4DJb5kN6oMjX5960nqvJzzqnpV0pnXl44nopl7+8FANRqGIX7Xu6ixbCIyAdUu6MRsvdfgcwvhPlyNrJ2X0JktzpaD8vvFTcMSk9PtzUMuuGGG2zrY8KDMfvhzrjnk+3IN1twJDkDk5YexIf3d+D8+zrhjkzNhwFMAnBRuVAIcROA2QBCAWwDsBZAIYAPhBC3uOG4ROSH9J6pKZgd6Dt4okJ6xvJz3TIKzqmpFzFx4bbrqcnZnEePKIAZo4IRdXt92+30H0/DkseMQVeVbhi0Z8+eMtvcUC8ab9xdEuhcsf8Cfv7jslfGR57njqBmTwB7pJTJpZY/DcAI4HUp5W1SyiEAiic4eNwNxyUiP6T3oCYRkVcwqKlbqqAmMzX9WlhUkO16QV4hstN53kMUyCJvrQdjNetnIUtmATI2nsO5l7bg3EtbNB6Zf1M2DDp58iSuXbtWZpv7ujTAvZ1LgsqfbT7htfGRZ7kjqNkAwB92lvcHkAvg7eIFUsqtAH4G0MMNxyUiPxRQ5efMyPAdfC5IbxjU1C12P9eP0qWNaeyAThTQDMFGVLuzse12xpbz2g1GR6Kjo9GiRUmn871799rd7m8DWsJksL4u/3IyBb+dS/XG8MjD3BHUrAlA9d8ohIgF0BjAL1LKnFLb/w6gnhuOS0R+SP+Zmix59hksPyc9Y1BTt5SZmhY+t7qSmlz6YxERBZrwjnEIqhdpvWHma7y7KBsG7du3T9UwqFid6DAMbV/XdnvOlpNeGRt5ljuCmgUAYkst61j0016IPAvsnkEUsPQf1CTfxLcd0hkGNXVLGdQ0Wzjfmr/rclcT2/VUZmoSBTxhEIge3KTyDckhxQ2DANgaBtkz/taS3/3qgxdx7jpfl/2dO4Kax2EtNVcaAOsnyJ12tq8D4JIbjktEfiigys9JW8zUJD1jUFO3WH6uLzFxYbbrqcn88ExEQGizGIS2UeeFsZGYa4xGY6UNgwCgXd1o3NKsBgCg0CIxf9spbwyPPMjkhvtYBeAVIcRnAP4LoCWsjYByAfxgZ/vuAJjnSxSglJmaBXrM1GQgjYi8Qflaw6CmrrD8XF9i4ks6oKddYfk5USCrqCHQ+Ulbyyyr//atnhyO7nTs2BGbNm2ClNLWMKhGjRpltptwW1Ns/9PaTOjrXWfwbL8WiA4LKrMd+Qd3ZGpOB3AawDgA+wB8AyAKwH+klOnKDYUQbQG0hrVZEBEFoEAqP5csefYd/Pab9IaZmrrF8nN9iY5TBDUv58Bi4fsREZEnVLVhUO+WtdAizjqvaVZ+Ib7edcYr4yPPcDlTU0qZJoS4BcDrsHY1vwbgGynlTDubDwZwAMD3rh6XiPyT3svPBRsF+Q4+FaRnqqAmgyR6oiw/Z6am/wsJMyEsKgg5GQUoNFuQmZKLajXDKt+RiHSndOal+VoOLr2323rDZEDdf3WHIdhoZ0+qqs6dO+Po0aMArA2D+vTpA5NJHfYSQmDCrU3xzyW/AQDmbzuFsT2bINjkjpw/8ja3PGtSyotSyvFSynZSytvKCWhCSvm+lLKjlPKAO45LRP4nkDI1yXdwniLSHWZq6pYqU1MyU1MPVCXol1mCTkRWphqKLzjMFuQdT9VsLHrRvHlzREVFAbA2DDpy5Ijd7e7uWBc1I63JNpfSc/H9wQteGyO5l1tD0UKIGkKI0p3QiYhsTEGKoGaB3oOaDKRpivObkp4xqKlbnFNTf5Ql6OyATkTlyTl8Tesh+L2qNgwKMRkx5pZGtttzNp9kEoSfcktQUwhxtxDiOIDLAK4IIY4KIYa5476JSF/0Xn7Ommci8go2CtItVfdzC7uf6wE7oBNRVeT+kQLJeXdd1rFjR4ii86QTJ07g2jX7weK/dGuE0CBrSOzwxXRb8yDyLy4HNYUQ3QEsAdAU1k/zAkBzAEuK1hER2bD8nDTBb15Jb5ipqVsGxXPL8nN9UJafp7L8nIjKYcksQP65DK2H4fdiYmLQvHlz2+2PP/4YU6ZMKbNd9Yhg3HdzA9vtOVtOeGN45GbuyNT8W9H9TAMQD6A2gDcAGIvWERHZBFRQk4E0TQmWn5OeMaipWyZR0tDAYuFzqwcxLD8nonLUf/tWhHeOt93OTUrRcDT60blz5ypt92jPJrbil41HruBoMoPK/sYdQc0eALZIKadIKa9IKS9LKf8FYAuAW9xw/0SkIyw/J00wvkx6w/Jz3VKVn0uWn+tBdK2S8vOMqzkoNPN/lohKhLUpaUuSm8QSaHdo0aKFrWFQRRrXjMCdbWvbbicwW9PvuCOoGQdgp53lvwCo5Yb7JyIdCahMTdIYA8ykY8zU1C1l+TmDmvpgCjYiMtb6pa6UQPpVlqATUYmQFtUBk/W8teBSNswpuRqPyP+VbhhUkQm3NbFdX77vAi5n8PfvT9wR1AwCkGVneRYAk53lPk8I8ZQQ4jchRHrRZYcQYojW4yLSA1X3c90HNZkeSEQeogpq8rVGT5Tl52wUpB/qEnQGNYmohCHEiNBmMbbbOczWdIuOHTuqbqek2C/t79woFh0bxgAA8gstWLj9tKeHRm7kl0FHLzgH4EUAx2AN/I4GsFwI0VlK+ZumIyPyc+pMTR2WnzM50Dcx6EN6w0xN3TIYmKmpRzFx4Tj3x3UA7IBORGWFtqmB3CPW14jcpBRE9ayn8Yj8k72GQMVmzJhR7vYTb22KJ77cCwD4fOdpPNmnGcKDGS7zB+56lsYIIXqXWtYYAIQQP9vZXkop+7np2G4npVxRatFkIcQTsM4fyqAmkQsCqfxcMlNTW2wURHrGoKZuqTI1GdTUjei4knk109gsiIhKCW0TCyy3Xs87kQZLrhmGUAbVvGVAu9poGBuOMynZSMspwOI95/BIj8ZaD4uqwF3/JY2LLvb0trPMpU/6Qoj/A3A7gA4A2gOIAvCllPLhCvapD2uH9oEAagC4COvLxlQp5fUK9jMCuBdAJIDtroybiEo3CtJfUFMwVdNHMcBMOsOgpm6pGgWx/Fw3YuLZAZ2IymeKDkFQvUgUnM8ELBK5R68j/Ca2KHFU6UzNvLw8/Pvf/wYACCEwadIkBCuSbIoZDQLjejXBayt/BwAkbDmJv3RrBKOBn+18nTuCmn3ccB+OegXWYGYmrKXirSvaWAjRDNaAZByAFQD+ANAVwHMABgohekopr5Xa50YAOwCEFh1nhJTyoJsfB1HACaRMTdIYMzVJzxjU1C02CtIn5ZyaaZxTk4jsCGsTaw1qAsg9fI1BTTcICSlJqJFS4uLFi2jUqJHdbe+9uT4++PEo0nIKcCYlGz8evoSBN9Tx1lDJSS4HNaWUm9wxEAf9FdZg5nFYMzY3VLL9LFgDms9KKT8uXiiE+KDovt4E8HipfY7AmgkaA2AkgAVCiN5SykNuGD9RwNL9nJoqzA4kIg9hUFO3WH6uT1E1QyEMAtIikXk9DwV5hQgKMVa+IxEFjNA2NZD+0xkAQM6R65CFEsLIL+nd6cKFC+UGNcODTXi4e0PM3PAnAOCzzScY1PQD7uh+riKE6CqE+LsQ4kMhxGwhxOtCiOFCiFB3HUNKuUFKeUzKyjs/CCGaAhgA4BSAmaVWvwZrl/ZRQoiIUsfIl1Iel1LullJOArAf1gAoEblA7+Xn7BTko9goiPSGQU3dUmVqsvxcN4xGA6rVLPk4lHaFJehEpBZUNwLGaGsCiMwxI/90msYj0p8LFy5UuH50j8YINlrfh/eeScWe0+XOVEg+wm0zzwohugD4FNaycKDkk33xJ8kUIcQ/pZTzS+1nklKa3TUOO/oW/VwnpfqsX0qZIYTYBmvQszuA9RXcjwFASAXrAQBCiD3lrGqdkZGBjRs3Vj5iP5ORkQEAunxs5H6ysOQDWkF+HjZs2AChozJhi/zDdj35UjKuXN7osWPxf69iURcvoLjY7+jRo8jh74ncSOv/v/apaahedP3A/n24foaBe71Iyk6yXU++kszX+FK0/t9zRaGp5KPItg27Ed1AP+c/FBj8+f/PX9SqJhCdZg2qHV13ANda8/3dnY4fP17p32+32gZsOW99vf730l/wdEe35ec5Te//e8WPzxluydQUQgwBsAXWcu2LAP4H4B0A7xZdvwhrc54EIcRbiv0GAHjeHWOoQKuin0fLWX+s6GfL4gVCiLeFELcKIRoLIW4UQvwb1oZHX3pumESBQRiNJXMdSglp0XOGEU9CfAafCtIZqfoyiH/gemJQnJ5boOf3yMATHFlyPT9du3EQke/Kiit5T4+4LPgW7wa33nqrLYkmJycHBQUFFW5/Z+Mg2/U9yYW4nM33Yl/mcqamECIe1sClBcATABKkVE8AJIQwABgH4EMALwohfoS1Y/k3AP7t6hgqEV30s7zc7eLlMYpltQF8UfQzDcBvAAZJKX+o7GBSys72lgsh9kRFRXXq3bt3FYbsX4q/LdDjYyPP+G3+TBTkWifJ79mjO0LCIyrZw39cvHgdh4uSbOLja6Ndu94eOxb/9yp2cdMmpBZdb9GyBWL5eyI30vz/72wNoKgiqv2NNwItNBoHuZ3xnNFWOxQTG8PX+FI0/99zwSFxDpuOWvMsYiPi0bt3W41HROQYf/7/8xeywIILB3dA5lsQnC3Qs11XBCkajZFzjh8/josXLwIAmjVrhqZNm1a4/Y9XdmHT0SuQAH4viMN9vW/wwijLp/f/vaioKKf3dUem5l8BRAL4i5Ty09IBTQCQUlqklHMA/AXWsvTPAXwLIAXAMjeMwRWly+QhpRwjpWwkpQyRUsZJKftXJaBJRFUTOB3Q+dUqEXkI59TULaOhpHkM59TUl+j4ksBEKjugE5EdIsiA0BbVbbdzk65pOBr9qFu3ru36+fPnK91+wq0lQc9Fu88hNVvPn1n9mzuCmgMB/CKlrDQ4KaVcDuAXAHUBJAHoJqU86IYxVKQ4EzO6nPXVSm1HRB6m6w7oOpofVFfYKIj0RhnU5BcoumIUiqAmu5/rSkycMqjJRkFEZF9omxq26zlJKRqORD+UQc3KmgUBQM/mNdC6tjV7MKegEF/+csZjYyPXuCOo2QTAdge23w7r2XdPKeU5Nxy/MkeKfrYsZ32Lop/lzblJRG6m/w7o5Av01ICKqAxmauqWKqjJTE1diYwJgTHI+r+bm1mA3KyK53UjosAU2rq6rZ40/3Q6CjP5eclV9erVs12vSlBTCIGJt5VkayZuP4U8M9+TfZE7gppBABz5LysAkCelzHLDsatiQ9HPAUVze9oIIaIA9ASQA2Cnl8ZDFPACpfxcMnvKd/CpIL1hUFO3VOXnzNTUFWEQiK4VZrudxhJ0IrLDGBmM4IZFBaUSyD1yXdsB6UCtWrVgMllbyqSlpSEzM7PSfe66qS7iq1mTca5k5GHF/sqDoeR97ghqXgRwowPbtyvaxyuklH8CWAegMYCnSq2eCiACwEIvBlmJAp4yqFmgs/JzAWYH+g4+F6RjDGrqljJT08LnVndi4lmCTkSVC20Ta7vOeTVdZzQaUbt2bdvtqmRrBpsMGNuzie12wpYTkJzSyue4I6i5GcAdQojWlW0ohGgD4E4Am1w5oBBiuBAiUQiRCOClosU9ipcJId4vtcuTAC4DmCGEWC6E+LcQ4mdYmxwdBTDZlfFUMM4oIURdIURdAEEWC09MiQAgKEAyNYmIPEY5vQIDX7qizNQ0W8wajoQ8gfNqElFVhLUtmVcz9+h1yAK+17vK0RJ0AHiwa0NEBFvfl48mZ2LT0SseGRs5zx1Bzf/CWoL+nRCibXkbFQU0VwEwApjp4jE7ABhddLmzaFlTxbL/U25clK15M4BEAN0AvACgGYAZAHpIKT311ccLAM4XXW5MTk720GGI/EvgzKnJb/J8Br9VJb1hpqZusVGQvkXHKcrPkxnUJCL7TLXCYKwRCgCQ+RbknUjVdkA64GgHdACIDgvC/V0a2m7P2XLC7eMi15hcvQMp5R4hxHsA/gFgrxBiKYD1AM7C+om+IYD+AEYACAbwrpRyj4vHnAJgioP7nAUw1pXjOmE6gM+Krq+Nj493pEyfSDem339XuetWvPd6mWUvfPOdJ4fjYSx59hlsFER6xqCmbrH8XN/U5eecU5OI7BNCIKxNDWRutQbfcpJSENoqtpK9qCKlO6BLKavUWHRsz8ZI3H4SFglsO34Nv19IQ7u60Z4cKjnAHZmakFK+CGuQ0QDgAVgDeWsArC26fn/RuilSypfKuRvdkVJmSCkvSCkvACgwGNzy6yYiIkcxU5P0RhXU5N+3niiDmiw/15/S5eecn42IyqOeVzOFrxcuqlGjBoKLpkHLyspCenp6lfZrEBuOwTfWsd1O2HLSI+Mj57icqVlMSjlNCLEAwKOwdhSvA2vK0kUAWwHMk1KedtfxiMh/lM68/GH2Rzi04UcAwB0Tn8FN/e60t5v/44mHtpipSXrGTE3dUs6pyUxN/QmLCkJwqBH5uYUoyC1Edno+IqJDKt+RiAJOSONqEKEmyFwzCtPyUHAxC8F1I7Uelt8yGAyoW7cuTp06BcBagh4dXbWMywm3NsV3v1n7Xa86cAH/HNgKdaLDKtmLvMGtqYNSytNSyteklP2llO2klG2llP2KljGgSUQAAIPiA5u0cL4wIiKHMaipW5xTU9+EEKoS9DQ2CyKicgijAaGtq9tu5x5mF3RXlS5Br6r2DWLQtYk1c9ZskUjcdsrdQyMnsR6aiLzOYFJ8YDPr9wObZKMgH8LngnSGQU3dYlBT/6LjOK8mEVVNmKIEPScpRcOR6IMzHdCLTbi1qe36V7vOIDOPU8T4AgY1icjrDMaSmS8shTp7M2DJs+/gc0F6xqCmbinLzwtZzaBLMYoO6KnsgE5EFQhtGQsYrOe0BeczUZiWp/GI/Ju9ZkFV1a91HJrWjAAAZOSa8c2vZ90+PnIcg5pE5HUGo2K+sEJ+YCMv4PympDe5aSXX//wZuJyk3VjIrQyKgDUzNfVJ1QGdQU0iqoAhzISQpiXzPub8wWxNV8TExCAszPrFUm5uLlJSqv77NBgExt3axHZ73taTMBfyi2WtMajpQUKIKCFEXSFEXQBBFgv/4ImAQApqMpCmKSZqkh6d2AjMHwz8oWjAdmgJMKu7dfmJjVqNjNzEJEqqGRjU1CfVnJpXWH5ORBULba3ogs55NV0ihHCpBH1kp/qIjbB2UD+fmoM1hy65dXzkOAY1PesFAOeLLjcmJydrPBwi36Dn8nPBSBoRecrehcDnI4DT2+yvP73Nun7v594dF7mVwaDI1GT5uS4p59RMu5wDaeGXoERUPuW8mrl/psKSz/cGVyhL0M+fP+/QvqFBRozq3sh2e86WEw6VsJP7MajpWdMB1Cu6HIyPj9d4OES+wRgwmZrkK3iyQX7vxEZg1XOVz58pLcCqZ5mx6ceYqal/IWEmhEUFAQAKzRZkXM/VeERE5MtMNcJgKs7wNkvkHbuu7YD8nLMd0IuN6tEIwSZrKO23c2nYdZJTAmiJQU0PklJmSCkvSCkvAChQfvNOFMiEUdn9XF+ZmuQ7BBsFkZ5serfqDYGkBdj0nmfHQx7DRkGBIUaZrZnMEnQiqlhYmxq26+yC7hplUPPixYtwdJrAmpEhGNmpvu32nC0n3DY2chyjbETkdcpMTam7D2wMpPkkJmqSP7ucVH7JeXlOb2XzID/FRkGBIVrZLOgymwURUcVC2ypK0JNSOG2FC6pVq4aoqCgAQEFBAa5cueLwfYzrVdIw6Keky/jzSqbbxkeOYVCTiLzOYFKU1pl1/IGNJc8aY4CZdOLEJu/uR5oyCkWmpizk9Bk6FRMXZrvOoCYRVSa4fhQMkdZpKyxZBcg/m6HxiPybqyXozeMi0b9NnO12wpaTbhkXOY5BTSLyOj03CiIicrs8Jz+4OLsfacogDKqmc5aqTjtAfkVZfp7K8nMiqoQwCHUXdJagu8TVoCYAjL+1qe360r3ncDUzz+VxkeMY1CQirzMYS156LIU6+7DGeRx9EzOdyJ+FRHl3P9Kccl5NBjX1KUZRfn7m92sajoSI/IV6Xk2+briiXr16tuuOdkAv1q1JLG6qHw0AyDNb8PmO024ZGzmGQU0i8rpAydSUnMhRWwwwk140vd27+5HmlB3QzVK/75OBLLpWmOp2oZnBayKqWEiLGMBkPb81J2fDfI1Z3s5SZmomJyfD7ETzWiGEKlvz852nkVug46nVfBSDmkTkdcpGQZZCvb3wM5Dmk5ipSf4srg3QqKdj+zTqZd2P/JKqWZDuGuoRAJiCjYisHmK7nX6VwQkiqpgh2IjQ5tVtt9kF3Xnh4eGIiYkBABQWFuLy5ctO3c/gG2qjXoz1S6qUrHws2XvOXUOkKmJQk4i8TtkoyOLEt2JEVcJMTdKT2/8JiCqetgkDcPs/PDse8ihl+Tk7oOtXjKoDOoOaRFS50DbKeTVZgu4Kd5Sgm4wGjO3Z2HZ77paTsLAzvVcxqOlBQogoIURdIURdAEEWC8tKiADAoJwrTNcZKHxDIyI3adobGPpR5YFNYQCGzrBuT36rdAd00idls6A0dkAnoioIUzQLyjuZDksOE0Sc5Y5mQQBwf5cGiAqxJu2cuJqF9X84l/VJzjFVvgm54AUArxXfSE5O1nAoRL7DYNJv+blg+bmPYoCZdKDTI0BMQ2DTe8DprWXXN+plzdBkQNPvKYOabBSkHzMf/7ncddsWH8e2xcdVy56a3dfTQyIiP2OMDkFQ/UgUnMsELBK5R1MQ3j5O62H5JXcFNaNCg/BQt4b4dPMJAMCcLSdwR9t4l8dHVcNMTc+aDqBe0eVgfDz/sIkAdaOgQpafk6ew/Jz0qGlvYOz3wCOrSpYFRwJP7rQuZ0BTF5Tl52YL3yeJiKiEMlsz5zDn1XSWMqh5+fJl5OfnO31fY3o2hslg/eyx62QKDpxNdXV4VEXM1PQgKWUGgAwAEEIUGAyMIRMBgEHXjYKUmB3oM9goiPSm/s0l1wsL2BRIZ1h+rk+lMy8vHLuOZdP3AQBqN43GyH921mJYRORnQtvWQPpPZwAAuUeuQxZaIIyMNTgqJCQENWvWxNWrVyGlxKVLl9CwYUOn7qtOdBiGtq+LZfusc3PO23YSHz3Q0Z3DpXLwL5+IvE7fQU1mBxKRFwSFAYYg6/XCPKAgV9vxkFupys85J7tuhUQE2a7nZhVoOBIi8idBdSJgjA4BAMhcM85P3qbxiPyXu0rQAeDRnk1s1zceucKGQV7CoCYReZ2y/NxSqOOyOmYHaovxZdIzIYDQ6JLbuWnajYXcTlV+LnX8PhngQhnUJCInCCFUXdDJee7ogF7shnrVUDMyGACQllOAI8kZLt0fVQ2DmkTkdUY9Z2oykOaTJAPMpEdhMSXXc1O1GgV5ABsFBQZlUDMvqwCSWT1EVEVhbWuobvNc1znuzNQUQqBrk5Jg866TnO/UGxjUJCKvM5gUmZpsFEQeItgoiPSOmZq6xUZBgcFoKvkoJiWQl8PnmoiqJqRpNESw4r3icraGo/FftWvXRnHvk2vXriE317XpfLo1KQk2M6jpHQxqEpHXKZtm6XmuMMlGQUTkSQxq6hYzNQNHVI1Q23WWoBNRVQmTAaEtY2y3c5IYQHNGUFAQ4uLibLddzdZUZmr+cvIaM2i9gN3PicjrVJmaOptTU7D+3DfxfIL0iEFN3WL388ARGhGEjGvWzCAGNYmoIude2lLuuvS1p5C+9pRqWf23b/XwiPShbt26uHTpEgBrULNp06ZO31er+ChUCzUhPdeMq5n5OHE1C81qRbprqGQHMzWJyOuUjYIKzfywRh7C8nPSu9CYkuucU1NXlEFNlp/rW2hEyTlRbiaDmkRE3ubOeTUNBs6r6W3M1CQirzOoGgXxwxp5AUs/SI+UmZo5qZoNg9xPOacmy8/1rXSzICKi8tjLvFRmb9b+ZxeYYkPLbEMVc2cHdMA6r+ZPSZcBAL+cuIYHuzZ0+T6pfMzU9CAhRJQQoq4Qoi6AID3PHUjkCIOeu5+z/NyH8LkgnWP5uW4ZRMkpOsvP9U0Z1MzN4he9ROQ889UcrYfgl+Li4mAs+nyalpaGrKwsl+5PPa9mCufV9DAGNT3rBQDniy43JicnazwcIt+g76AmEZGXMKipWyahmKaFQU1dC4lUBjWZqUlEzjNfY1DTGUajEbVr17bddrUEvV3daogo6kx/MS0X567zefEkBjU9azqAekWXg/Hx8RoPh8g3GHXcKEiN38r5DH5DSnrEoKZuKcvPCy0MauqZOlOTQU0icp75CoNnznJnCbrJaEDnxupsTfIcBjU9SEqZIaW8IKW8AKDAYOCvmwgADMq5wgr1Ni0DS559BhsFkd6FxZRcZ6MgXWH5eeBgUJOI3IWZms5zZ7MgAOimahZ0zeX7o/IxykZEXmcwBUijIGYH+hA+F6RDqu7nzNTUE1X5OTM1dS1UWX7O7udE5KD45zvZrnNOTecpg5pHjx51eR5MdkD3HgY1icjrDEZl+XmhviZPZnag7+BzQXrH8nPdYqZm4GCmJhG5wqjodm6+ngupuyo476hZsyaCg4Ntt9PT0126v5vqRyPEZH0vP3UtG8npuS7dH5WPQU0i8johBIRiOgY2CyIicgKDmrqlmlOTQU1dY1CTiFxhCDbCGF0UjLMA5hQGz5xhMBhQp04d221XS9BDTEZ0bBhju815NT2HQU0i0oSqA7pOS+skS559h56ygYmKlQ5q8u9cN1h+HjhU5edZOp6Sh4g8xlQzzHadJejOc/e8ml2b1LBd57yansOgJhFpQlWCbtbPBzbBRkG+g08F6Z0pBDAVfZCxmIH8LG3HQ26jbC7JTE19Cw41wmCwvmGZ8wpRWMDSUSJyjDqoyUxNZyk7oLu/WRAzNT3FVPkmRETuZzQaUVxkpetmQeQTdDVvK5FSaDSQWZSVkZsGhERqOx5yC6Ng+XmgEEIgJMKEnAzrWVFuVgEiYkI0HhUR+RNVUJMd0KtsypQp5a77888/y6yvaHt7OjWsDpNBwGyROJqciZSsfMRGBFe+IzmEmZpEpAmDSd0sSJ8YSNMUGwVRIOC8mrqkCmqy/Fz3OK8mEbnCVIPl574oLNiIm+qXnKcxW9MzmKlJRJow6LZREANpRORFDGrqEhsFBRbVvJqZDGoSkWM4p6Zz7GVeKpf97W9/Q7Vq1Vw6RtcmNbD3TCoAa1Bz4A21Xbo/KouZmkSkCXWmpl7Lz5mp6TNYfk56FRZTcp1BTd1g+XlgYaYmEbnCFBtqy6soTM2DLOD7hjtkZbk+V3m3piXzav7CZkEewaAmEWlC2f28UEeNgsh3CJafUyBQZWqmajYMci+WnwcWBjWJyBXCZICxeqjttvkamwW5gzuCmp0bVUdRLzgcvpiO9Fy+xrsbg5oeJISIEkLUFULUBRBksbCbIVExVfdz3WZqks9goibpFcvPdYnl54GFQU0ichVL0N3PHUHNaqFBaFvXWsIuJbDn1HWX75PUGNT0rBcAnC+63JicnKzxcIh8hzJTU19zaiqw5FljzNSkAMCgpi6x/DywcE5NInJVkCKoWcCgplu4I6gJAF0b17Bd38kSdLdjUNOzpgOoV3Q5GB8fr/FwiHyHboOaLHkmIm9iUFOXlEFNi2Slj94xU5OIXGWqoSg/Z1DTaX379rVdd1dQUzmvJjugux+7n3uQlDIDQAYACCEKlN2eiQKdkeXn5E3MmiW9Co0puZ6TqtUoyM0MouSc0Wzhe6TeqYOafL6JyHGmWuG26+ZrDGo6KyIiwnbdXUHNLo1LgpoHz6UhO9+M8GCG4tyFUTYi0oTBpMhC0WmjIMmJHLXFrFkKBGwUpEsmg+KLP2Zq6l5oZMnzzfJzInIGMzXdwxNBzdiIYLSMjwQAmC0Se0+nuuV+yYpBTSLShEHRBMGio86ugvM4+iZmapJesfxcl5Tl58zU1L8Qlp8TkYuM1UMBo/VziCWjAJY8vnc4wxNBTQDo1qRkXs1dnFfTrRjUJCJNGEyKLBQz33TJA5ipSYGAQU1dYvfzwMI5NYnIVcIgYIpVZmvmajga/+WpoGbXJiUl6L9wXk23YlCTiDShbBRUqKdGQURE3hQWU3KdQU3dYKOgwKIMauZlFUBaWF1ARI4zKTqgswTdOaWDmtJN1V7dFEHNfWdTkVvAz7/uwqAmEWnCoGwUpKPyc7D83EfxAyLplLJRUOYlYOds4HKSZsMh92CjoMBiNBkQFGoNZEsJ5OXwOScixzGo6brg4GCYiioKzWYz8vPz3XK/cdVC0aSmNWCab7bgt3P8ItpdGNQkIk0YjCUvP3ptFMR5HDXG+DLp3YmNwNd/KbltzgPWvgjM6g7MH2xdT36JjYICD0vQichVDGq6TgjhuRJ0RRd0zqvpPgxqEpEmVJmahXrKSGAkzRe5q3SEyGfsXQh8PgI4s93++tPbrOv3fu7dcZFbKMvPOadmYGBQk4hcpQpqXmNQ01mcV9O/mCrfhIjIddPvv6vcdas/fh+rP35fteyFb77z9JBI79goiPTqxEZg1XNAZRl80gKsehaIaQA07e2NkZGbsPw88IRGlHwsy81kUJOIHMdMTffwRlBzz+nrKCi0IMjIPENX8TdIROQxzA4kIg/Y9G7lAc1i0gJses+z4yG3Y/l54FE1C8pmIJuIHGeMCoYIsoZ4LNlmWLL5BYkzPBXUbBAbjnox1sBzdn4hfr+Q7rb7DmTM1CQiryidefnD7I9waMOPAIABjz2LG/sO0GJY7sfsQN/E8nPSi8tJ1tJyR5zeat0vro1nxkRup8zUZPl5YFCVnzNTk4icIAwCphphKLhkDcQVXM1BSMOgSvai0jwV1ASs2ZrL9p0HYJ1Xs0ODGLfefyBipiYRaUIogn+SWSjkAYIBZtKjE5u8ux9pgnNqBp6QSM6pSUSuM9UMtV1nCbpzPB3ULPbLCc6r6Q4MahKRJoQiC0Va9JlFJ1l+7jv4VJBe5GV4dz/ShLL8vNDCoGYgYKMgInIHU81w23UGNZ3jyaBmN0VQc9epFBTq9HOwNzGoSUSaEAZlpqZ+XswFu5/7DmZqkh6FRHl3P9IEy88DD4OaROQOqkzNa7kajsR/hYeXBIbdHdRsUjMCNSNDAAAZuWYcucQvnV3FoCYRaUOZqanb8nP9BGuJyEc0vd27+5EmTIKZmoEmNJJzahKR69gB3XWezNQUQqiyNX85ec2t9x+IGNT0ICFElBCirhCiLoAgi0WvgRsix6nm1GTaPXmajrKBKcDFtQEa9XRsn0a92CTIzzBTM/AwU5OI3KF0UFNPFXHe4smgJgB0a6ooQT/JeTVdxaCmZ70A4HzR5cbk5GSNh0PkO5Tl59BtpiZpi+XnpFO3/1OV7V4hYQBu/4dnx0NuZzSwUVCgYVCTiNzBEBEEEWJ9D5F5hbAw89thyqBmdnY23J2cpmwWtOtkCgPPLmJQ07OmA6hXdDkYHx+v8XCIfIeyUZBus5j5BuU7+FyQnjTtDQz9qPLApjAAQ2dYtye/oup+zvLzgKAqP88yazgSIvJnQgiWoLvIZDIhJMQ676WUEjk57v0dtoyLQnSY9TX/WlY+/ryS6db7DzQManqQlDJDSnlBSnkBQIHBwF83UTFV+bmuAk7MDvQZbBREetbpEWDUMmtpuT2NelnXdxrl3XGRWzBTM/AEhxphKKpiMecVorBAp1/4EpHHMajpOk+WoBsMAl0aK+fVZAm6KxhlIyJNCEWQX+o1U5OIyJOa9gbGfg80v6Nk2U0PAE/utC5nhqbfUmVqMqgZEIQQCIkoaRDFEnQichaDmq7z9Lya3TmvptswqElEmlAFNXWVqVlCsvu5D+FzQToWGl1yvXl/NgXSAZafBybOq0lE7hDEoKbLSs+r6W7KeTV/OcF5NV3BoCYRaUJZfq6r+Q5Z8uw7+FwQkZ9Slp9b2EwvYKjm1WRzDyJykipT8xqDms7wdKZm2zrVEBlizc6/lJ6Lsyl8npzFoCYRaULZKIjl5+RxegqcE5HuKTM1zZJNYwIFMzWJyB1MNUJt1wuu5kJaeB7sKE8HNU1GAzo3qm67/cvJa24/RqBgUJOINCEMem0URD6DiZoUkPh6qgcsPw9MDGoSkTsYwoNgKJ6j12xBYXq+tgPyQ54OagLqEnTOq+k8BjWJSBPq7uf6ydQUjKQRkbdxqgXdYffzwMSgJhG5i6mGcl5N988JqXfeCGp2a8IO6O5gqnwTIiL3C4zyc2ZM+QpmAxORP2H388Aw8/Gfy123c/kJ7Fx+QrXsqdl9PT0kItIJU80w5J/JAACYr+YCzTUekJ/xRlDzpvoxCDEZkGe24ExKNi6m5aBOdFjlO5IKMzWJSBPqTE09BZyYMeUrBLPXiMhPKYOabBRERESOMrEDuku8EdQMNhnQqWHJvJosQXcOMzWJSBPCEAiZmuQz9BQ3J6qIrr4kClyqRkEWNgrSq9KZl4e3XcCGz/8AALTuURv9RrfVYlhEpAMMarrGG0FNwDqv5o4T1iZBv5xMwd0d6nnsWHrFTE0i0oQqqKnXD+F6fVz+gpmaFDD4t643yjk1makZOExBJedGhQV83onIeaqg5jUGNR0VFhZmq/rKzc2F2eyZLxhV82qeYAd0ZzCoSUSaYPk5ERGRfex+HphMQYoMXQY1icgFqkZBKbmQhXr6vOV5BoMB4eHhttvZ2Z5pttSxYXVbHsafV7KQb+Zrv6MY1CQiTaiCmiw/J0/TVeCciPROmalpliw/DxRGZmoSkZsYQowwVAu23iiUKEzN1XZAfsgbJehhwUbEhgfbbl/PzvfIcfSMQU0i0kQglJ9LTuSoMWbNUiDi644esFFQYFKWnzNTk4hcpcrW5LyaDvPWvJo1I0Ns169k5HnsOHrFoCYRaUJdfq6jE3fO4+ibdBo4JwLA1x0dMghFxh7LzwOGkUFNInKjIMW8mgUMajrMW0HNGpElmZrXspip6SgGNYlIG0LZ/VyvASe9Pi4/wUAPEfkpk8Fku14oGdQMFKZgZfk5n3cicg07oLtGi0zNq8zUdBiDmkSkCWHQ55yagiXPRKQlZiXrgqpREIOaAcNoYqYmEbmPqWao7br5GufUdJQ2mZoMajrKVPkmRETuZ1DNqckTd/IwBnqIyI+w+3lgMgUrnncGNYnIQede2lLuuryj18usr//2rZ4ekl/TJFMzk+XnjmJQk4g0IYT+GwWx/FxjLD/3CovFgpSUFGRkZCAvL0/H/89q4eHhAICkpCSNRwKg9j1AdG/r9cKGgC+MiVwiIfFas9dst33i78xH+NT/nptZCiVuGG79EG0wCNtjFEIgJCQEUVFRiI2NVX0xTEREnuGtoGYtlp+7hEFNDxJCRAGIKroZZNFRiS2Ry5SNgnQ7pyb5Dv6NeYLFYsHZs2eRnZ2t9VC8rjiw4hOi6gCRtazXjcEVb0t+QUCgaXRTrYfhk3zqf8/NhEGgeh3rh2jl13JSSuTm5iI3NxdZWVlo0KABA5tEVEbpzEtptuD8K9usNwxAvTd7qZq1UsW0KD+/ykZBDmNQ07NeAGD7mj05OVnDoRD5FtWcmjoqP8/JOWu7npd3GZmZRxEZ2VLDEQUwnrN5XEpKCrKzs2EymVC7dm1EREQEzAftjIwMAEBUVFQlW3rB9dNATor1ekxdILyGtuMhtzh89TBk0RcyrWq0UnVED2Q+9b/nZlJKXDmTYbsd16gaAOsXSFlZWbh06RKys7ORkpKCmjVrajVMIvITQjFPLyyAzLdAhBjL34FU2CjIP/DsyLOmA6hXdDkYHx+v8XCIfIey/FwP8x2mpGzDnr0P4nDS323LcnPP4Zddg7Bn74NISdmm4eiIPKM4uFC7dm1ERUUFTECTyCv4xUzAK57Ow2AwICoqCrVr1wZQ8tpLROQIS45Z6yH4ldJBTU9NscRGQa7hpw8PklJmSCkvSCkvACjghz2iEkLop/v5hQuLsG//GKSm7rK7PjV1F/btH4MLF7718sioWKDM8+hteXnWEy/lSR8RuYeAsqKBr2GBQAihPj8q9bQXv9YWv/YSETlC5jKo6Yjg4GCYTNbiZrPZjPx8z5SGKzM1r2Xmw8Kp2RzCKBsRaUIY9NEoKCVlG5L+mAygssCsBUl/vMyMTS/inEGep8wiIiIiN1BPpqleVfS+5s/nTUSkHWZqOkYI4ZUS9NAgIyJDioKnFon03AKPHEev+CmEiDShDDj5cxOtk6f+i8oDmsUsOHlqpieHQ+Xh5z8i8jOqjD2+iAWMijI1+WUdETkqtHWs7TqDmo7z3ryaimZBmczGdwSDmkSkCfVJu39+WMvMPFpuyXl5UlN/QWbmUQ+NiFT44Y+IdIJBzcChfOvy1/MjIvIdhtCSxkAMajpOi2ZBVzLYAd0RDGoSkSaEQdF5z08zNa9f3+7V/YiIKHAo59RkTDNwqIOa2o2DiPRBhJls1y2cU9Nh3gpqslmQ8xjUJCJNCIP/Z2qazZle3Y9c4Kd/Y0TkHYmJiRBCIDExUeuh2HU97zpyzblaD4O8gVFNInIjgyKoKZmp6TAtMjWvZjCo6QgGNYlIE+ryc//M1CwszPbqfuQolp+T95TuWvzQQw9BCIFPPvmk0n3vuOMOCCGwfPlyACUBtoouSj9u2IIXpn6Afvc9htj6zSGEQK9evao07pUrV2LQoEGoVasWQkJC0KBBAwwbNgw7d+5UbXfw4EGMHz8eHTt2VG3bv39/LF26tNwvp06ePInHH38crVu3Rnh4OOLj49GjRw989tlnVeoiunnzZhiNRggh8Morr1TpMRXr3bt3hb/DMWPGOHR/VTFlyhQIIbBx40an7yMzPxMn007CbCn58Hkl+wr+TP0TJ9NOIjOfX4zpGWOaROROhlBlpmahhiPxD1OmTFFdtm0rafL6888/l1nvLjWUHdCzWH7uCFPlmxARuZ8Qeuh+7mzQjME2r/PbvzHyVxMnTsT//vc/zJkzB0888US52506dQrr169HnTp1cNddd6nWtW/fHsOHD6/0WDPnLsSK1T8iNDQEzZs2xfXrqZXuY7FY8Pjjj2POnDlo0KAB7rnnHtSoUQPJycnYuXMn9uzZg+7du9u237NnD5YvX47u3bvjlltuQXR0NC5duoRVq1Zh5MiRePjhh/H555+rjvHrr7+iT58+yMnJwcCBA3H33XcjPT0dq1atwmOPPYYlS5Zg7dq15TY/ycjIwOjRoxEeHo7MTOcDeaNHj0bjxo3LLO/QoYPT9+kp13Ov40LmhXLXZxdk43TBadSNrIvqodW9ODLylooaBREROUqZqck5NX1XLTYKchqDmkSkCVX5uZ/OqWk0hnl1P3IQGwWRhnr37o2WLVti37592Lt3Lzp16mR3u7lz50JKibFjx8JkUp+WdejQoUpZAC8++zje/PtEtG7eGGczjWjS1v6xlKZPn445c+Zg1KhRSEhIQHBwsGp9QUGB6vYDDzxgN7MxPT0d3bt3xxdffIFnnnkGXbt2ta2bMmUKsrKykJiYiNGjR9uWv//+++jatSvWrVuHLVu24LbbbrM7xueeew5paWmYNGkSJk+eXOljKs+YMWPQu3dvp/f3lsz8zAoDmkoXMi8gyBCEyOBID4+KvE311sWoJhG5SIQyqOmI0uddhw4dwuLFiwEAbdu2xX333eeR4yozNa9mMlPTESw/JyJN6CFT02Ry7sOks/sRkX+ZMGECAGDOnDl21xcWFmL+/PkQQmD8+PFOH6dH185o16oZjEZj5RvDGoicNm0a6tevjzlz5pQJaAJAUFCQ6nZoaKjd+6pWrRruvPNOAMCxY8dU606cOAEAGDZsmGp5REQE+vXrBwC4cuWK3ftdsWIF5s+fjxkzZqBu3bpVeFSesWHDBkycOBFt27ZFtWrVEBYWhhtuuAFTp05Fbq56jsvGjRtj6tSpAIA+ffqUO11Aea7k2P9duGt78hPM1CQiN1LNqclGQQ5TfuFsNnvu96eaU5OZmg5hUJOINKEur/LPs/aQkHpe3Y9c4Kd/Y+TfRo8ejeDgYHz11VfIzi47l+6aNWtw/vx59O/fH02aNHHPQavwp75y5UpkZmbigQcegMViweLFi/H2229j5syZOHDggEOHy87Oxs8//wwAuPHGG1Xr2rVrBwD4/vvv7e4TERGBHj16lLnPy5cvY8KECRg+fDgefvhhh8bjbu+88w7WrVuHDh064LHHHsP48eMRHByMKVOmYNCgQSgsLJmf7Pnnn8ftt98OwPrcv/baa7ZLZXLNucgucGy+5eyCbDYP0iH1nJp87yIi17D83DXeC2qy/NxZLD8nIk2ogpp+Wn6el3feq/uRg1h+ThqrVasWhg8fjkWLFmHRokVlyreLMzgnTpxod//9+/fbLT8fPny4S/NB/vrrrwCs2Zht2rTB6dOnVetHjhyJhQsXIjw8vMy+x48fxxdffIHCwkIkJyfj+++/x4ULFzBp0iTcdNNNqm3feOMNbN++HWPGjMGiRYvQtm1bpKen47vvvoPZbMa3335rNwtz4sSJsFgsmD17ttOPUSkxMdFu456qlPbPmjULTZo0KZNt+eqrr+KNN97A4sWLcf/99wOwBjVTU1OxadMmh0veswqc66iaVZCFUJP9LFryT5xTk4jcyRBaUsVhYaamw5RVMMovMt1N1SiI5ecOYVCTiDQhDCVvEP4a1DSbnWtc4ex+5Ap+MtRC45e+r3wjH3Hq7SEeud+JEydi0aJFSEhIUAU1L168iNWrVyM+Ph5333233X0PHDhgN3OycePGLgU1L1++DAB499130bFjR1vA8fDhw3jqqaewZMkSREZGIjExscy+x48ft5VYA0BwcDDee+89vPDCC2W2bd26NX799Vc8+OCDWLVqFVatWgXAGkx9/vnnVY2Iis2bNw8rVqzAN998g/j4eKcfo9KCBQvsLq9KULNp06Z2lz///PN444038MMPP9iCmq6wSOfeB53dj3wY59QkIjdSZ2qy+7mjvJWpWS3UhGCjAfmFFmTnFyI734zwYIbrqoLl50SkCVWjID89aeecmj6OiZrkA/r27YtmzZph27ZtSEpKsi2fP38+zGYzxowZU2b+ymKjR4+GlLLMxV7DHkcUZxqEhYVh1apV6Nq1KyIjI9G1a1esXLkSkZGR+Pzzz3H+fNms8oEDB0JKifz8fBw/fhyTJ0/Gyy+/jGHDhiE/X51ZsG/fPtxyyy3IycnBli1bkJGRgbNnz2LatGn44IMP0K1bN6Slpdm2P3XqFJ5//nnce++9bp2If8OGDXZ/j1WRlZWFt956C126dEF0dDQMBgOEEKhZsyYA2P0dOcMgnDsld3Y/8l3q8nPtxkFE+qBsFCTzzJAWvrA4wluZmkII1FCUoDNbs+p4JkREmlCXV/lnpkn16rd4dT8i8j/KJkAJCQkArF/kzJs3z+UGQfZV/mGlevXqAIDu3bujdu3aqnV16tRBt27dYLFYsHv37nLvIygoCM2aNcO//vUvTJs2Dd999x1mzJhhW282m3HffffhypUrWLVqFXr16oXIyEjUr18fL730Ep555hkcO3YM//nPf2z7PProowgLC8OsWbMcfdAeUVBQgL59+2Ly5MnIzc3F/fffj0mTJqnmyczLc8+8VxFBEV7dj3wXy8+JyJ2EQUCEFAXmJCDzmK3pCG9lagLqZkFXOK9mlTGflYg0oep+7qffGEZGtkRMTFekpu6q8j4xMd0QGdnSg6Mie/w1G9jfeaqk29+MHTsW//rXv7Bw4UL8+9//xpYtW/Dnn3+ib9++aN68uesHcDAruVWrVgCAmJgYu+uLg545OTlVur9BgwZh0qRJ2LhxI/7+978DAP744w8cP34cnTp1KhM4BazdwT/88EPs2bPHtmzv3r1IS0tDrVq17B7nzTffxJtvvom7774by5cvr9LYXLFixQrs2rULo0ePLlOKf/HiRVUZvqtCTaEIDwp3qFlQeFA459PUI+X/s5+eHxGRbzGEmVBYFMy05JhVJelUMe8GNRXNgjIY1Kwq/jUTkTZ0UH4OAE0aP419+8cAqEq2qQFNGj/l4RGRDRsFkY+Ij4/HsGHDsGTJEixfvhzLli0DUH6DIE/r168fAOD333+3u754eePGjat0f8Ul2MoT/+IMxqtXr9rd58qVKwCsc3IWe+SRR+x2iT927Bg2b96MDh06oHPnzujYsWOVxuWq48ePA7A2Tipt06ZNdvcpLlNzpkStVlgtnC44XfmGiu1Jf5ipSUTuZgg1oRDW92U2C3KMt8rPgVLNgrJYfl5VLD8nIk2oMjX9tPwcAGJje6JN6zdR+cupAW1av4XY2J7eGBYR+ZgJEyYAAKZPn45ly5ahZs2aGDFihCZjad++PXr27ImkpCRbSXyxhIQEJCUloVmzZujSpYtt+datW1FQUFDmvq5cuYKXXnoJADBkSElm7g033ICYmBicOXOmzDFSU1Px/vvvAygJsALAjBkzkJCQUOYyduxY2/0nJCTgqafUXw717t0bQgi7Hc5dURzULX2/J06cwIsvvmh3nxo1agAAzpw54/DxIoMjUTeybDd4e+pG1kVkMOdn1iP1nJqMahKR60SYogN6DoOajtCq/JyZmlXHTE0i0oQyE8Hfy6vq1r0PoaH1cPLUTKSm/lJmfUxMNzRp/BQDmlriB0PS2IABA9CkSRPs2mWdruLpp59WZSm6YuuOX5EwfyEAIDPf+rd+7NgxVUOh0uXTc+fORa9evTBhwgQsXboU7dq1w+HDh7F69WqEh4cjMTFRlZ3w9NNP49KlS+jZsycaNmwIo9GIU6dOYfXq1cjJycHw4cPx6KOP2rYPCQnBhx9+iLFjx2LChAn4+uuv0bFjR1y/fh0rV67ElStX0L17d4wbN87lx2+xWL8YU37wcIehQ4eiefPm+OCDD3Dw4EF07NgRZ86cwXfffYchQ4bYDVz26dMHBoMBkyZNwqFDh2yl/K+88kqVjlk9tDqCDEG4knPFbil6eFA4aoXVYkBTx5ipSUTuZlA2C2JQ0yHKcyFvlp8zU7PqGNQkIk0ou58XfyD1Z7GxPREb2xOZmUdx/fp2mM2ZMJkiUb36LZxDUyOC5efkQ4QQGDdunC24VZy56Q7HT57Ggm9XqZZdvnwZCxYssN0uHdRs1aoV9u7di6lTp2LNmjX46aefEBsbiwcffBCvvvoq2rRpo9r+hRdewPLly7Fv3z788MMPyM/PR82aNdG3b1+MGjUK9913X5n/udGjR6NJkyb48MMPsWPHDmzatAkhISFo1aoV/va3v+H5559HSEgIXCGlxO+//47GjRuje/fuLt1XaREREfj555/x0ksvYePGjdiyZQuaNm2KV199FX/729/wzTfflNmnTZs2WLBgAd5//33MmjULubm5AKoe1ASsGZuRwZHINeciqyALFmmBQRgQERTBOTQDgfLfiFFNInID5RyaLD93jPILU0+Xn7NRkHMY1CQiTRgMJd96+XP5eWmRkS0ZxPRF/FxIHlaVMtHJkydj8uTJlW43ZswYVZZlpds/dC/GDO9rvRHdAIioWaX9GjRoUKY0vDyjRo3CqFGjqjymYrfddhtuu+02h/dTquj3cfDgQaSkpOD1118vk6npSDl6ecdo0KABvvzyS7v7lPecP/zww3j44YerfOzyhJpCGcQMQOryc+3GQUT6oQpq5rD7uSOUmZoWiwUWiwUGg2dmcayhzNRkULPKOKcmEWlD6KNREPkwZmoS6d6mTZsQHx+vKn0n8meC50dE5GYilJmazhJCeK1ZkGpOzUyWn1cVMzWJyCum339XueuunDpRZv0L33zn6SEREZGfe+aZZ/DMM89oPQwi92GmJhG5mTJTk3NqOs5kMtmCmWazGUFBQR45jjqoyUzNqmKmJhER6R8/GRIRkR/gfNBE5G7KRkHsfu44b3VArx4eZCs0S80uQEGhfqZo8yRmahKRV5TOvLxy5hQW/uNpAECN+g0xZvosLYZFusYPhkRE5F9Uc2pa+IUcEbmOjYJc463yc5PRgNjwYFvn85SsfMRX49zalWGmJhFpguEm8ipmahIRkR9Qz6mp4UCISDcMYYpmN8zUdJi3MjUBdbMglqBXDYOaRKQNToRPnsYSPiIi8jfKty6eHxGRGygbBUlmajrMW5maAJsFOYNBTSLShGrOKJ60ExEREYGnR0Tkbqryc2ZqOsy7mZolQc1rzNSsEgY1iUgjikxNDUdBgYJ/ZURE5PsEK1mIyM3UQU3PZhrqkTeDmjVZfu4wNgryICFEFICooptBFgu7VxHZsLyKPI3V50RE5G9KvXdJKdkRnYhcIoKN1tcWCcj8QshCCWHk60pVsfzctzFT07NeAHC+6HJjcnKyxsMh8h3qE3QGNcmzmO1CRET+QAjBZkFE5FbCIFTzarIDumOYqenbGNT0rOkA6hVdDsbHx2s8HCJfwvIq8jBmthARkT9iNQsRuZmyBJ3NghzDTE3fxvJzD5JSZgDIAAAhRIHBwBgyUTEmahIRERGVJUTJqRFjmkTkDoZQI4rDcWwW5Bg2CvJtjLIRkTaUpVWMapKn8VMhBQz+rRP5OzYLIiJ3Ywd057H83LcxqElEmhDK2iqesJMHsLECERH5JVazEJGbGTinptO0Kj+/lpnPL7aqgEFNItIGJ8Enb+LfmP+7nATsnA1ses/683KS1iMKKFOmTIEQAhs3btR6KES6x0ZBRORuQjmnZo5nA3N6481MzdAgIyJDrMczWyTScgo8ejw9YFCTiDShTqLjGTt5ADM19eHERmD+YGBWd2Dti8CGN6w/Z3W3Lj+xUesRAijbsfihhx6CEAKffPJJpfvecccdEEJg+fLlAIDExETb/ZV3Ufpxwxa8MPUD9LvvMcQ2aAkhBHr16lWlca9cuRKDBg1CrVq1EBISggYNGmDYsGHYuXNn1R+8A+w9tpCQEDRp0gRjxozB4cOH7e5nNpsxd+5cDBgwAHFxcQgODkZcXBzuvPNOJCYmVjlz4urVq6hdu3alv6OtW7fi7rvvRuPGjREaGoqGDRti8ODBWLt2rVOPm8gRQlXMwnMkInIdMzWd581MTYAl6I5ioyAi0gjniyKiSuxdCKx6DpAW++tPbwM+HwEMnQF0GuXdsVVi4sSJ+N///oc5c+bgiSeeKHe7U6dOYf369ahTpw7uuusu1br27dtj+PDhlR5rZsJCrFi9DqGhIWjetCmuX0+tdB+LxYLHH38cc+bMQYMGDXDPPfegRo0aSE5Oxs6dO7Fnzx5079690vtxlvKxpaWlYePGjViwYAEWLVqEn3/+WXXsc+fOYdiwYdi3bx/i4+MxZMgQ1KlTB5cuXcKaNWuwbt06zJ49GytWrEB8fHyFx33ssceQlZVV4TaffPIJnnzySURERGDEiBGoX78+zp07h6VLl2LNmjV44403MHnyZJd/B0TlYaYmEbkb59R0njczNQFrs6BT17IBWDugN4/z+CH9GoOaRKQJVaYRT9jJ0/ip0P+c2FhxQLOYtACrngViGgBNe3tjZFXSu3dvtGzZEvv27cPevXvRqVMnu9vNnTsXUkqMHTtWddIMAB06dMCUKVMqPdaLzz2BN/8+Aa2bN8bZDIEm7W6udJ/p06djzpw5GDVqFBISEhAcHKxaX1Dg2XKn0o+t+HewYMECTJo0CRs2bAAAZGdnY9CgQTh06BBGjx6NWbNmITw83LZfdnY2nnzySSxYsAB33XUXtm3bVuaxFFu4cCGWLl2KWbNm4cknn7S7TUFBASZNmoTQ0FDs2bMHrVq1sq17+eWX0bFjR7z55pv4+9//jpCQELv3QeQy1ZyafP8iItcxqOk8bwc1manpGJafE5E2VFkIlQQtiJzC8nO/tundygOaxaTFOtemj5kwYQIAYM6cOXbXFxYWYv78+RBCYPz48U4fp0fXzmjXqpmqPKoi6enpmDZtGurXr485c+bYDQIGBQVV6b7OnDmDdu3aITg4GF988YVD41YSQtgCjbt27bIt/+CDD3Do0CHccsstmDdvniqgCQDh4eGYN28ebrnlFuzevRuzZs0qd5zPPvssxo0bh0GDBpU7jpSUFKSlpaFly5aqgCYAtGnTBi1btkROTg4yMzOdfahEZVw+na665CsCDmlXclTrCvI5Fx4ROU45pybLzx3j7fLzGqWaBVHFGNQkIk2oMzWZhUAexr8x/3I5yVpa7ojTW32uedDo0aMRHByMr776CtnZ2WXWr1mzBufPn0f//v3RpEkTr41r5cqVyMzMxAMPPACLxYLFixfj7bffxsyZM3HgwIEq38+BAwfQo0cPnD17FqtXr8bDDz/s0riKpyJRvj8UB4RfeeUVGAz2T1sNBoOtHHz27Nl273fMmDGIjo7GBx98UOEY4uLiUKtWLRw9ehTHjh1TrSte1qFDB9SoUaPqD4yIiEhjhtCSwJxkpqZDvJ+pWRLUZKZm5Vh+TkTaYPU5eRobBWlvSrR3jzfLhTkgp6S5bxxFatWqheHDh2PRokVYtGgRxowZo1pfHLCbOHGi3f33799vt/x8+PDh6NChg9Pj+vXXXwFYszHbtGmD06dPq9aPHDkSCxcuLJMVqfTTTz9h5MiRiIiIwJYtW9C+fXunxwNYA4/FWZbdunUDAJw9exZnzpyByWRC7969K9y/T58+MJlMOHLkCJKTk1Vza3744YfYuHEj1q1bh2rVqiElJaXc+xFCYObMmXj44YfRuXNnjBgxAnXr1sX58+exbNkytGvXDl9//bVLj5WotLhG1VS306/mIDfLOgVEVI1QhClKEa9lVy0jm4hISVV+nsuMb0do2yiImZqVYVCTiDQhwExNItK/iRMnYtGiRUhISFAFNS9evIjVq1cjPj4ed999t919Dxw4YDdzsnHjxi4FNS9fvgwAePfdd9GxY0csWrQIbdu2xeHDh/HUU09hyZIliIyMRGJiot39v/jiCzz66KNo3rw51q5di4YNGzo8BmXAtrhR0P79+xEWFoY333wTgPV3BAA1atRAWFhYhfcXFhZma3R0/vx5W1Dz8OHDePnll/H444+jf//+VRrbvffei7p16+LBBx/EwoULbcvj4+MxduxYNG3a1NGHS0REpClV9/Mcz86brTfM1PRtLD8nIm0Idj8nL+LfGGmkb9++aNasGbZt24akpJLy+Pnz58NsNmPMmDHlzl85evRoSCnLXEpnfDqqOMsgLCwMq1atQteuXREZGYmuXbti5cqViIyMxOeff47z58+X2fejjz7CI488gm7dumHbtm1OBTQBa8B26tSpmDp1KmbOnImUlBSMGjUKu3fvtnU+t1eOXpHi7XNzcwFYm/6MGjUKderUwbvvvlvlsX3xxRfo378/br31ViQlJSE7OxtJSUno168fnn76aTzwwAOOPFQi1/Dti4jcQN0oiJmajmBQ07cxU5OINFHVD6lETuPfmPacLem+nORcKfmTO4G4Ns4d00OKmwBNmjQJCQkJmD59OqSUmDdvnssNgpxVvXp1AED37t1Ru3Zt1bo6deqgW7duWL9+PXbv3o169eqp1m/evBlSSvTr1892P84YPXp0uZmgyrEAwNWrV5GTk1NhtmZOTo6trLxWrVoAgH//+9/Yt28fNmzYgMjIyCqN6+jRo3j00Udx00034fPPP7fN49m6dWt8/vnnOHLkCL799lts3Lix0pJ4Iqfx7YuI3EzVKCiDJc2O8H6jIMWUIyw/rxQzNYlIc8zUJM/j35hfiWsDNOrp2D6NevlcQLPY2LFjERQUhIULFyI/Px8///wz/vzzT/Tp0wfNmzf3+niKu3rHxMTYXV8crMzJySmzbu7cuejWrRumTp2KV1991WNjBICGDRuiYcOGMJvN2LhxY4Xbbty4EWazGTExMWjWrBkAYO/evZBSonfv3hBC2C7FTZm2bdsGIYTq97Bu3ToUFBTg9ttvL9OYyGAw4LbbbgMA7Nmzx30PlKgUxjSJyN1EkAEwKCrlzBYNR+NfmKnp25ipSUSaYPdz8jh+KvRvt/8T+HwEIKtw0i0MwO3/8PyYnBQfH49hw4ZhyZIlWL58OZYtWwag/AZBntavXz8AwO+//253ffHyxo0bl1kXExODH3/8EUOGDMEbb7yBvLw8h0q7HTVhwgS8+uqreOuttzBw4EC7Wf4WiwVvvfUWAOD++++3BSPvuOMO1KxZs8z2mZmZ+OabbxAfH4+77rpL1RApL8/64eHKlSt2x1O8PDg42O56IvdQBB40HAUR6YcQAoYwIyxZ1qCcJdcMYyTfy6rC25ma1UJNCDYakF9oQXZ+IbLz2a2+IszUJCJtCJ6wE1EFmvYGhn5kDVhWRBiAoTOs2/uwCRMmAACmT5+OZcuWoWbNmhgxYoQmY2nfvj169uyJpKQkJCQkqNYlJCQgKSkJzZo1Q5cuXezuHxUVhbVr16Jfv35477338Nxzz5XZJjExEUIIl+f//Otf/4p27dph69atGD9+fJns0ZycHEyYMAFbt25FfHy8Knv0qaeeQkJCQpnL22+/DQBo3rw5EhISMGPGDNs+t956KwBg8eLF+O2331TH2r9/PxYvXgwhBPr27evS4yKqkDJ2z5MkInITdbMgBsqqytuZmkIIlqA7gJmaRKQ9ZmqSh3GKAz/V6REgpiGw6T3g9Nay6xv1smZo+nhAEwAGDBiAJk2aYNeuXQCAp59+2m3Zflt37ELC/AUAgMx869/6sWPHVAHF0vNXzp07F7169cKECROwdOlStGvXDocPH8bq1asRHh6OxMREVWZCaeHh4fjuu+8wcuRIzJgxA7m5uZg9e7Ytk9JisWbYKj8IOCMiIgJr167F0KFDMW/ePKxevRqDBw9G7dq1kZycjNWrV+PixYuoVq0aVqxYUWYOUEd17doVY8eOxfz589GlSxeMGDECjRo1wqlTp7B8+XLk5+fj+eefR7t27Vw6DhERkbep5tVkULPKvB3UBKwl6BfTrI0PWYJeMQY1iUgTLD8nT2MzKp1o2tt6uZwEnNgE5GUAIVFA09t9dg5Ne4QQGDduHF555RUAJZmb7nD85Gks+HaVatnly5exYMEC2+3SQc1WrVph7969mDp1KtasWYOffvoJsbGxePDBB/Hqq6+iTZvKf7ehoaFYtmwZHnjgAXz22WfIy8vDvHnzYDAYcPDgQQBwS6fw+vXrY9euXViwYAG+/vprrFy5EikpKbbA6dChQ/HZZ5+VaXrkrLlz5+K2225DYmIifvjhB2RkZKBatWq2IDC7n5N38RyJiJxz7qUt5a67MutAmWX1377Vk8PxW94uPwfUzYKuZuYjyCtH9U8MahKRNlTl5zxhJw/jn5j/i2vj00HMqmQDT548GZMnT650uzFjxjhUtj3moXsxZngf641q9YHIWlXar0GDBmXKz8szZcoUTJkypczy4OBgLF26tMzyTZs2oUuXLujfv796rA4+tmJBQUEYP368qlv8+vXrMWjQIBw/frzCrNLSGjduXOHzVVw272rpPJGzVNXnfP8iItKUVpmaxa5m5qGOV47qnxjUJCJNCOUpO0/YyROYqUmkibS0NBw4cABLlizx6HH69euHTz75BOPHj8cdd9yBDRs22Dq3E/k1vn0RkRuUzry89r8/kHPA2vAu9oFWCO8Qp8Ww/I7yi1NvBTXVc2rmoQ674ZSLQU0i0oaq+pxRTSIivYiOjvZaeda4ceNgMplw8uRJbN26FUOHDvXKcYmIiPyNMCgq5Qr5+auqlJma3jq/qaXK1MwHqnnlsH6JQU0i0oR6vkO+qZKHMXBOehbgWV2jR4/WeghE7sVTJCLyBKPixYVBzSrTovxcPadmHoOaFWASKxFpRPFNId9TyRNYfk5ERH6J719E5H5CEdSURc32qHJaNAoqPacmlY9BTSLShOAs+ORhBZcu2a7nnTiBvGPHNBwNERFR1TBRk4g8QRhLwj8sP6+60pma3pg6rXp4SaZmanaBx4/nzxjUJCJtsPs5eUjWjh04/fAoXJn+gW1Z3uHDODF0GE4/PApZO3ZoODoiT+PrKZHfUyVq8n+aiNzEwPJzZxgMBtXUad7I1gwPLskOzSnwTnaov2JQk4g0we7n5AmpixfjzLjxyN692+767N27cWbceKR6uCszkXexVJVIt3iORERuoio/Z1DTId5uFhQRUnK8rDwGNSvCoCYRacOgan+u3ThIN7J27MDFf70GVDZHkMWCi6/+ixmbRERERBQ4VI2COKemI7zdLChMmamZ753mRP6KQU0i0oRQNQrimyq57urMWZUHNItZLLg66xPPDoiIiMhV/N6XiNxEGJSNgvji4ghvNwsKDyo5XnZBoVfm8fRXDGoSkTYEu5+T++QdO1ZuyXl5sn/9lc2DiIjI5wjO0ENEnqBoFMQ5NR3j7UxNk9GA4KLnS0qggDlA5WJQk4g0ITgJPrlR1o6dXt2PiIjIczhPLhG5H+fUdJ4yU9MbQU1AXYLOaTXLZ6p8EyIiT1BmavJNlVxjycr06n5ERERERH5FOacmy88rNGXKlHLXzZo1y6HtnRURbERaTgEAIK9QIopfeNnFTE0i0oRgbRW5kSEi0qv7EREReYzycyvPkYjITdSZmqxn9nWqTE32CioXMzWJSBuqmCbP2Mk1ET26e3U/8r7j14/jl0u/IDM/E5HBkehWuxuaV2+u9bACxpQpUzB16lRs2LABvXv31no4RLqmzsXhORIRuYcwlOS0sfy8YqUzL+fMmYPz588DAMaPH4/69et7fAzhwSXhujw+X+VipiYRaUKoopp8kSbXhLRogfCbb3Zon/AuXRDSooWHRkTusvPiToxZOwYjVo7A27vexn/3/xdv73obI1aOwJi1Y7Dzom/MiyqEUGWgP/TQQxBC4JNPPql03zvuuANCCCxfvhwAkJiYaLu/8i5KP27YjBemfoB+9z2G2IatIIRAr169qjTulStXYtCgQahVqxZCQkLQoEEDDBs2DDt3eub3au+xhYSEoEmTJhgzZgwOHz5sdz+z2Yy5c+diwIABiIuLQ3BwMOLi4nDnnXciMTGx0k6kV69exT/+8Q+0bt0aYWFhiImJQceOHfHiiy9WOubNmzfDaDRCCIFXXnnFqcdN5CyeIRGR2yjLzxkkc4gQ3p86jXNqVg0zNYlIG4JzgpB71XzqSZwZNx6wVKGcxmBAzSef8PygyCVLjy3F1B1TYZH2n9M9yXvw2I+PYUqPKRjRYoSXR1exiRMn4n//+x/mzJmDJ54o/2/t1KlTWL9+PerUqYO77rpLta59+/YYPnx4pceaOWcBVqxeh9DQEDRv2gTXr6dWuo/FYsHjjz+OOXPmoEGDBrjnnntQo0YNJCcnY+fOndizZw+6d/dcJrPysaWlpWHjxo1YsGABFi1ahJ9//ll17HPnzmHYsGHYt28f4uPjMWTIENSpUweXLl3CmjVrsG7dOsyePRsrVqxAfHx8mWPt27cPd955J65du4YBAwZg+PDhyM3NxYkTJ7Bo0SK888475Y4zIyMDo0ePRnh4ODIzOQcveQnLz4nIA1Tl55xT0yFaBDXDVUFNPl/lYVCTiDRROtNISllmGZEjInr0QJ1pU3HxX69VHNg0GFDn9WmI6NHDe4Mjh+28uLPCgGYxi7Rgyo4pqBNZB93r+M50Ar1790bLli2xb98+7N27F506dbK73dy5cyGlxNixY2EyqU/LOnToUKWJ5198/km8+fcJaN28Mc6mA01u6FLpPtOnT8ecOXMwatQoJCQkIDg4WLW+oKCg0vtwRenHVvw7WLBgASZNmoQNGzYAALKzszFo0CAcOnQIo0ePxqxZsxAeHm7bLzs7G08++SQWLFiAu+66C9u2bVM9luvXr2Po0KHIz8/Htm3bygRqK3uczz33HNLS0jBp0iRMnjzZDY+ciIhIIwZlpibn1HSEFkHNCFX5uVcO6ZdYfk5EvoEl6OQGMf/3f2g4NwHhXewHdcK7dEHDuQmIGTnSyyMjR80+MLvSgGYxi7Tg0wOfenhEjpswYQIA6zxM9hQWFmL+/PkQQmD8+PFOH6dH185o16oZjEZj5RsDSE9Px7Rp01C/fn3MmTOnTEATAIKCgqp0X2fOnEG7du0QHByML774wqFxKwkh8OSTTwIAdu3aZVv+wQcf4NChQ7jlllswb948VUATAMLDwzFv3jzccsst2L17d5mOpP/5z39w/vx5vPnmm3YzTyt6nCtWrMD8+fMxY8YM1K1b1+nHRkRE5AuEkXNqOkv78nM+X+VhpiYRaUcIWzBTQoJ5muQOET16IKJHD+QdO4asHTthycqEISISET26cw5NP3H8+nHsSd7j0D67k3fj+PXjPtU8aPTo0Zg8eTK++uorTJ8+vUxAbs2aNTh//jzuuOMONGnSxGvjWrlyJTIzM/H444/DYrFg8eLFOH78OKKiotCrVy+0b9++Svdz4MABDB48GBkZGVi9ejX69+/v0riKPyQoPzgUB4RfeeUVGAz2v4s3GAyYPHkyhgwZgtmzZ+P555+3rfvqq69gNBoxatQoHD58GOvXr0d2djaaNWuGgQMHIjIy0u59Xr58GRMmTMDw4cPx8MMPIzEx0aXHRuQI5bzj/M6XiNxGOacmy88donn5Obufl4tBTSLSjIAo6XzO91Vys5AWLRjE1NiNC2706vFGrHR+Xs2Dow+6cSRWtWrVwvDhw7Fo0SIsWrQIY8aMUa0vDthNnDjR7v779++3W34+fPhwdOjQwelx/frrrwCsWYpt2rTB6dOnVetHjhyJhQsXlgnCKv30008YOXIkIiIisGXLlioHQssjpbRlWXbr1g0AcPbsWZw5cwYmk6nSjut9+vSByWTCkSNHkJycjPj4eFy/fh1//vknWrZsiSlTpuDDDz9UfRCpUaMGFi5ciMGDB5e5v4kTJ8JisWD27NkuPS4ip/BbXiLyANWcmsz8cwgzNX0Xg5pEpB0BWzDTW28ORETeNHHiRCxatAgJCQmqoObFixexevVqxMfH4+6777a774EDB3DgwIEyyxs3buxSUPPy5csAgHfffRcdO3bEokWL0LZtWxw+fBhPPfUUlixZgsjIyHKzE7/44gs8+uijaN68OdauXYuGDRs6PAZlwLa4UdD+/fsRFhaGN998E4D1dwRYg49hYWEV3l9YWJit0dH58+cRHx9ve5x//vknPv74Y7zzzjt45JFHIKXEF198gUmTJmHkyJHYu3cv2rRpY7uvefPmYcWKFfjmm2/sNh4iIiLySwZlUJNzajpCk0zNIM6pWRUMahKRZoQQigRNBjWJSH/69u2LZs2aYdu2bUhKSrIFz+bPnw+z2YwxY8aUO6/j6NGjPVL2XFhoPTMOCwvDqlWrULt2bQBA165dsXLlSrRs2RKff/453nzzTdSrV0+170cffYQVK1agZ8+eWLlyJapXr+7UGJQB26CgINSpUwejRo3CSy+9hLZt2wKwX45ekeLtc3NzVY+zsLAQL7zwAv7xj3/Ytv373/+Oixcv4oMPPsCHH36ITz+1zsl66tQpPP/887j33ntx3333OfXYiNyL50dE5B7KTE0w888hmjQKCmGmZlUwqElEGuKcUUR65mxJ9/Hrx50qJV82bJlPzakJwNYEaNKkSUhISMD06dMhpcS8efNcbhDkrOJAZPfu3W0BzWJ16tRBt27dsH79euzevbtMUHPz5s2QUqJfv35OBzSBqgVs69SpAwC4evUqcnJyKszWzMnJQUpKCgBr2T8A1fhGjCj79zRixAh88MEHqsZEjz76KMLCwso0HCLyKmUcn+dHROQmqkZBnFPTIdqXn3vlkH6J3c+JSDOq5BtGNYmoSPPqzdE5vrND+9wcf7PPBTSLjR07FkFBQVi4cCHy8/Px888/488//0SfPn3QvLn3x9yqVSsAQExMjN31xcHAnJycMuvmzp2Lbt26YerUqXj11Vc9NkYAaNiwIRo2bAiz2YyNGzdWuO3GjRthNpsRExODZs2aAbAGRatVqwbA/mO19zj37t2Ly5cvo1atWhBC2C5jx44FALz55psQQmD48OGuP0CicjCmSUQeYWCmprM0bxTE56tczNQkIu0o3xx42k5ECo+3fxyP/fgYLLLyOZ8MwoDH2j/mhVE5Jz4+HsOGDcOSJUuwfPlyLFu2DED5DYIcZlF8fZ+fXenm/fr1AwD8/vvvdtcXL2/cuHGZdTExMfjxxx8xZMgQvPHGG8jLy8O7777r+JiraMKECXj11Vfx1ltvYeDAgXZL0S0WC9566y0AwP3336/qkt63b18sX74chw4dQrt27VT7HTp0CID6cT7yyCPIzi77Ozx27Bg2b96MDh06oHPnzujYsaM7Hh4REZHXCBMbBTlLk0xNzqlZJQxqEpHXTL//rnLXzRg1ssyyF775zpPDISIf1r1Od7zW4zVM3TG1wsCmQRgwpccUdK/T3Yujc9yECROwZMkSTJ8+HQcOHEDNmjXtlkQ7JC8DyLgE5GeWLMvPsP4syLGuD4kqs1v79u3Rs2dPbNu2DQkJCaoS+ISEBCQlJaFZs2bo0qWL3cNGRUVh7dq1GDZsGN577z3k5eXho48+Um2TmJiIsWPHujwv6F//+ld8/fXX2Lp1K8aPH4///ve/qjL0nJwcPP3009i6dSvi4+PLZI8+9dRTWL58Od544w3ceeedtozN1NRUvP766wCABx54wLb9jBkz7I4jMTERmzdvtgVzibyGcQcichdVpiYbBTmCmZq+i0FNO4QQkwDcA6AVgDwAOwFMklIe0nRgREREAeSeFvegbmRdfHrgU+xO3l1m/c3xN+Ox9o/5fEATAAYMGIAmTZrY5m98+umnERwc7PwdZl0F0s4CALbu2oeEr5YDADKzrFmGx06cwphRfwGCIwBTSJnA4ty5c9GrVy9MmDABS5cuRbt27XD48GGsXr0a4eHhSExMhNFoRHnCw8Px3XffYeTIkZgxYwZyc3Mxe/Zs20m/xWL9sGQyuXaqGRERgbVr12Lo0KGYN28eVq9ejcGDB6N27dpITk7G6tWrcfHiRVSrVg0rVqwoMwdo//798cwzz+Djjz/GDTfcgKFDhwIAvvvuO5w7dw7Dhw/HI4884tIYidyuis2xiIgcoZpTk0Eyh2ge1DR75ZB+iUFN+3oDmAXgV1intZkG4CchRFspZYqWAyPyZ6UzLz96ZCTMeXkAgGcWfIvg0PKbQBBRYOpepzu61+mO49eP45dLvyAzPxORwZHoVrubz86haY8QAuPGjcMrr7wCwJq56bS8DFtAEwCOnzyLBd+uUm1y+WqKalnpoGarVq2wd+9eTJ06FWvWrMFPP/2E2NhYPPjgg3j11VdtXdorEhoaimXLluGBBx7AZ599hry8PMybNw8GgwEHD1qbRCmzIJ1Vv3597Nq1CwsWLMDXX3+NlStXIiUlxRY4HTp0KD777LMyTY+KzZgxAzfffDNmzZqFzz//HIWFhWjdujX++c9/4sknn1SVqxP5AoY0icgjFN3P2SjIMdoENZXl53y+yiO89YT4MyFEJIA0AMOllKsq276c+9jTqVOnTnv27HHv4HxA8eT9vXv31nQc5H9mPPJ/KMjLBcCgpjP4v0daS0pKAoAqBcD0JiPDWuYdFVW2vNvjrh5Tl5xXJjgSqNnCc+Oxo1OnTjCZTKrO4u62fv16DBo0CM2bN8emTZtsXc9J3zT93/OS/BwzUi9bs66DQo2oHh9hWxfIr7ukPZ57+rfCjHxcfPMXAIAhIgh1X/X9ShdfsWjRIhw+fBgAcO+995aZp9sTTl3NQu/3NwIAaoUJvHd7uG7/9zp37oy9e/fulVI61ikUftr9XAjxf0KIj4UQW4QQ6UIIKYT4opJ96gsh5gkhLggh8oQQp4QQHwohqlfhkFGw/q6uu+UBEFFZ/IKFiKhyBTmOBTQB6/YFZTuZe0paWhoOHDiAl19+2aPH6devHz755BMkJSXhjjvuwPXrPE0jnWD7cyLyAGFkoyBnaV5+zkZB5fLX8vNXALQHkAngHIDWFW0shGgGYDuAOAArAPwBoCuA5wAMFEL0lFJeq+AuPgKwH8AOl0dORCU4ZxQRkWPyMpzfL8g72fDR0dEoLPTO2fe4ceNgMplw8uRJbN261TZnJhEREZWimFMTFjYKcoQyqGnx0u8ujI2CqsRfg5p/hTWYeRzA7QA2VLL9LFgDms9KKT8uXiiE+KDovt4E8Li9HYu26QWgl5SS8XEiD2GiJhFRFVTQCd4j+/mB0aNHaz0EIiIin8dMTedpPadmfqH3jutv/LL8XEq5QUp5TFbhWRVCNAUwAMApADNLrX4NQBaAUUKIiFLrIIT4D4AHAfSVUp5weeBEpMJETSIiBwknT92c3Y+IvE55fsTPsETkNgbFi0uhZJDMAVoENY0GgRCT9fxNAsjX7/fTLvHXTE1H9C36uU5KdZqClDJDCLEN1qBndwDri9cJIT4C8ACA3lLKP6p6MCFEeZ2AWmdkZNgmV9aT4gnb9fjYyLPM5pLk561btsAYEqLhaPwP//dIa+Hh4QgPD7f9LQaS4vJqbz92Q6EBEbCe3Fble6Hi7bLMBlgC8Hki/dHqf8+bLOaSD8yWwkLVYy0sLER2djbf+0kTPPf0f81ggCg6g9i0YZOfprl5X3Jysu16UlISUlNTvXLcIGFBXtH1a2mZuv3fc+U9PRD+hFsV/TxazvpjRT9bFi8QQswEMBbWLM3rQojaRZdIzw2TKAAxU5OIyCEWYwjMxtAqv3wKAGZjGCxGfmlE5I+YR0VE7iQVESDBF5gqExqVGIYopgzI52SIdgVCpmZ00c+0ctYXL49RLHuy6Od69aaYCmBKRQcrrwW9EGJPVFRUp969e1e0u18q/rZAj4+NPOtg4izkIx8A0LNXT4RG8HsDR/B/j7SWlJQEAIiKitJ4JN5X/I2yJo89uD5w7XiVNzfF1ENUSOA9R6RPmv7veUlBXiHy0rMAAEajEVFRJbNkWW9HoWvXrloNjwIYzz393/mft0MWRcdu7dkLhtBACAm5Li0tDZcuXQIAtGzZEp072w37uF31vZtwLTcTABAUGqHb/z1X3tP5F1ySK2b7nkJKyfwxIi/Q6hsvIiK/FhIFRDcA0s5Wvm10Q+v2ROQ/lKdHnPOOiNyJzYKcosWcmgAQzg7olQqEoGZxJmZ0OeurldqOiLTA12gioqqLqAmYQoCMS0B+Ztn1wZFAVG0GNIn8kCqmqdkoiEiPhFGUvK5Y+ApTVb4Q1Mxl+bldgRDUPFL0s2U561sU/Sxvzk0i8hQmahIROS8kynopyAHyMgBpsXY5D4kCgsK0Hh0RERH5GKHK1GQ77arSLqhZErJjpqZ9gRDU3FD0c4AQwqDsgC6EiALQE0AOgJ1aDI6IrCRzEYiInBMUxiAmkZ4wVZOIPMWo6BTEIFmVaRXUDFOVn3vtsH5F993PpZR/AlgHoDGAp0qtngogAsBCKWWWl4dGFPCE8qydc0YRERERERF5jDBwTk1naJapGcQ5NSvjl5maQojhAIYX3axd9LOHECKx6PpVKeXfFbs8CWA7gBlCiH4AkgB0A9AH1rLzyR4aZxSA4smsgiwWpncTqbBREBEREVEpPD8iIg9RlJ9zTs2q84U5NfPNXjusX/HLoCaADgBGl1rWtOgCAKcB2IKaUso/hRA3A5gGYCCAwQAuApgBYKqUMsVD43wBwGvFN5KTkz10GCL/5803ByLyP3nHjiFrx05YsjJhiIhERI/uCGnRovIdiYj8DJufE5GnMFPTOZoFNUM4p2Zl/LL8XEo5RUopKrg0trPPWSnlWCllHSllsJSykZTyOQ8GNAFgOoB6RZeD8fHxHjwUkR9ipiYRVSJrxw6cfngUTgwdhuS33sKVj2Yg+a23cGLoMJx+eBSyduzQeogArCe7yovRaERsbCx69+6NxMREuyfAp06dKrOfyWRCXFwcBg4ciBUrVtg9VkFBAT766COMHTsWHTp0QHBwMIQQSEhIqHCMJ06cwLhx49CgQQMEBwejdu3aePDBB/HHH3/YPcayZcswbtw43HDDDahWrRrCw8Nx44034l//+hcyMjIc+v2MGTMGQggkJiYCANatWwchBLp161bpvl9++SWEEBg+fLhtWenfW+lL8XEA4Ny5c3jzzTdx7733onnz5jAYDBBC4Pjx45Ue+9SpU3jiiSfQtGlThIaGokaNGujWrRumT59eZtsXX3wR/fr1Q4MGDRAWFobY2Fh07NgRU6dOxbVr1+zef15eHmbOnImuXbuiZs2aiIyMRJs2bfDss8/i9OnTlY4vLy8PN9xwA4QQqF+/fqXbKyUmJpb5vYWEhKBJkyYYM2YMDh8+bHc/s9mMuXPnYsCAAYiLi0NwcDDi4uJw5513IjExEYWF9if9Wr58Oe6//360bt0a1atXR1hYGFq0aIEHH3wQu3fvLrN9VlYWvvzySzz00ENo3bo1IiIiEBUVhdtvvx0ff/wx8vPzHXq8foWnR0TkKWwU5BRfKD9n93P7/DVT0y9IKTMAZACAEKLAYPDLGDIREZEmUhcvxsV/vQaUM31L9u7dODNuPOq8Pg0xI0d6eXT2vfaatUCjoKAAx48fx7Jly7Bp0ybs3r0b//3vf+3uEx0djeeffx6ANUj1+++/47vvvsMPP/yA9957D3//+99V22dlZdm2j4+PR+3atXH27NkKx7V371706dMH6enp6Nu3Lx544AGcPXsWS5YswapVq/DTTz+he/futu3//PNP3HPPPYiIiECfPn0wZMgQZGZm4ocffsDrr7+Ob775Btu2bUPNmjWd+j3dcccdaNKkCXbt2oXffvsNN910U7nbFgdrJ0yYUGZd8e+7tA4dOtiu7969G6+88gqEEGjSpAmio6ORmppa6Rh/+OEH3HPPPTCbzbjrrrtw//33IzMzE0eOHMGyZcvwwgsvqLb/z3/+g06dOuGOO+5AXFwcsrKysHPnTkyZMgWfffYZdu7ciQYNGti2N5vN6NevH7Zt24bWrVvjwQcfREhICH799Vd8/PHHWLhwIbZv3462bduWO8aXX365SsHPirRv394WME5LS8PGjRuxYMECLFq0CD///LPq7+LcuXMYNmwY9u3bh/j4eAwZMgR16tTBpUuXsGbNGqxbtw6zZ8/GihUrUPrL/BUrVuDXX39Fly5dULduXQQHB9v+R7755ht89tlnGD9+vG37LVu24OGHH0ZsbCz69OmD4cOHIyUlBStXrsTkyZPx/fffY/369QgNDXXp8fs+ZuYQkfsINgpyim80CuLzZZeUkhcvXADs6dSpk9SjDRs2yA0bNmg9DPJDM8c9KN+/b4h8/74hMistVevh+B3+75HWDh8+LA8fPuyR+87cvl0ebtNWHm7VuvJLm7Yyc/t2j4yjPOnp6TI9Pd12G9bIQ5nttm7dKg0GgxRCyBMnTqjWnTx5UgKQjRo1KrPf//73PwlAhoeHy6ysLNW6vLw8uXr1annhwgUppZSvvfaaBCDnzJlT7ng7dOggAcgPPvhAtXz79u3SZDLJFi1ayPz8fNvyc+fOyZkzZ8rMzMwyxx4yZIgEIJ9++ulyj1fa6NGjJQA5f/5827I333xTApDPPPNMufsdO3ZMCiFkgwYNZGFhoW15eb9ve86ePSs3b94s09LSpJRS3n777RKAPHbsWLn7/PnnnzIyMlI2aNBAHjlypMx65e+qWE5Ojt37evnllyUA+cQTT6iWL1q0SAKQ/fr1Uz02KaX817/+JQHIsWPHljvGDRs2SCGE/OSTTyQAWa9evXK3tWf+/PkSgBw9erRqucVisT1fvXv3ti3PysqSN9xwg22f0n+XWVlZtv1uvvlmmZeXp1pf3u/nt99+kyEhITI6Olq1z759++QXX3xR5n7Onz9v+3t+//33HXrMvir5VFqVLwf2H/TY6y5RZXju6f8uf3pAnn1xszz74maZc+y61sPxG+vWrZOvvfaafO211+TmzZu9dtwvd56WjV78TjZ68Ts56uO1Xjuut3Xq1EkC2COdiLUxdZCIfAMnjSIihaszZ5WboVmGxYKrsz7x7ICc1LNnT7Ru3RpSSuzZs6fK+91///2IjIxEdnZ2mTLg4OBgDBo0CHXq1KnSfZ04cQL79+9HXFwcnnvuOdW6Hj164O6778axY8ewdu1a2/J69erhySefRERERJljv/zyywCAjRs3Vvnx2PPoo4/CZDLhiy++QG5urt1tEhISIKXEuHHj4GzFS/369XHrrbeiWrVqVd5nypQpyMzMxCeffIKWLVuWWR8UFFRmWXkZg/fddx8A4NixY6rlJ06cAAAMGTKkzGO7++67AQBXrlyxe5/p6ekYM2YM+vXrh8cff7ySR+MYIQSefPJJAMCuXbtsyz/44AMcOnQIt9xyC+bNm4fw8HDVfuHh4Zg3bx5uueUW7N69G7NmzVKtL+/3c+ONN6JNmzZIS0tTPd4OHTrgL3/5C4KDg1XbR0VF4emnnwbg+t8gEVHAYaMgp/hEoyBmatrFoCYRaYdzahKRHXnHjiHbzhx7Fcn+9VfklQoa+Yrik197gTBP7FfapUuXAACNGze2Gxhs2tTaZ3H9+vVVur/i8ZhMrs1iVLt2bdx11124fv06lixZUma92WzGggULYDAY8Oijj7p0LEcUFBRg8eLFiIuLw+DBg7Fr1y785z//wXvvvYfvvvvO4bkcV61aBQBlSuzbtWsHAFizZg0spQL43333HQCgf//+du/z2WefxfXr1zF37lyHxlJVxX97yg9xc+bMAQC88sor5QaYDQYDJk+eDACYPXt2lY519OhRHDlyBDVr1qxyoN5df4O+Iq5RNdWlRv1I2zqDQajWBSk+4BIROUrdKIhzalaVLwQ18zinpl36OBMgIr/nzTcHIvKOpNZtvHq8E0OHOb1vmz+S3DiSEps3b8aRI0cQHByMrl27Vnm/L7/8EllZWahVqxZatWrl0hiK5708ffo0pJSqE3OgJGPQXsMge+bNmwcAGDhwoEvjAoCJEydi+fLlmDNnDv7yl7+o1q1atQqXLl3CkCFDVHNRKk2ZMqXMssaNG2PMmDFOj+nQoUPIyclBjx498MADD2DRokWq9Q0bNsTixYvRpUsXu/u///77yMzMRFpaGnbv3o2tW7fipptuwksvvaTabsiQIbjnnnuwdOlS3Hjjjejfvz+Cg4OxZ88ebN26Fc8884wtI1Fp2bJlWLBgARISEtCwYUOnH2d5pJS2LMviRk5nz57FmTNnYDKZ0Lt37wr379OnD0wmE44cOYLk5OQyc2v+9NNP2Lp1K/Lz83Hy5Elb0DchIaHK2biff/45APf8Dfo6nh0RkVtxTk2naBfUZPfzyjCoSUSaKf3BmojI3xUH2ZSNgqSUeP/998vNQktNTbXtl5eXh0OHDuH7779HcHAwPv30U5cbobRs2RItW7bE0aNH8fHHH+PZZ5+1rfvll19sXdavX79e6X2tXLkSn376KerXr49//vOfLo0LAO688040atQImzZtwrFjx9CiRQvbuuIGQRMnTix3/6lTp5ZZdvvtt7sU1Lx8+TIAYNOmTQgLC8PcuXMxfPhwZGZmYubMmXj33XcxePBgJCUl2W2U9P777yM5Odl2e+DAgUhMTEStWrVU2wkhsHjxYkybNg2vv/66apqBfv364aGHHoLRqM7KS05OxmOPPYZBgwZh3LhxTj9Gpf3799v+/oobBe3fvx9hYWF48803AQAXL14EANSoUQNhYWEV3l9YWBhq1KiB5ORknD9/3m5Q85133rHdrl27NhITE3HnnXdWabyffvopfvrpJ3To0MGrGbzexLMjIvIUoex+zvLzKvOFRkG5Zq8d1q8wqOlBQogoAFFFN4NKlxYRERGRvpQOsgkhMHfuXIwdO7bcfdLS0srsFxISghUrVlQ50FOZTz/9FAMHDsRzzz2HVatWoUOHDjh37hyWLl2Ktm3b4rfffisTQCtt+/bteOihhxAREYElS5agevXqLo+ruLT8tddeQ0JCgi3Yde7cOaxduxZ169bFkCFDyt3fEx8sCgsLbT///e9/2wJnsbGxeOedd3D8+HEsXboUc+bMwaRJk8rsX1zun5ycjO3bt+Oll15Cx44d8d1336FTp0627XJzc/HII49gzZo1mDlzJu6++26Eh4dj27ZtePbZZ3Hbbbfh22+/tc2vCVg7wBcUFNhKwd3hwIEDOHDgAABrWXedOnUwatQovPTSS7bO6/bK0StSvL29uVLffvttvP3228jKysLRo0fx/vvvY9CgQXj99ddtpevlWbp0KV566SXEx8djyZIlLk/NQEQUcFTl5wxqVpVWQc3QoJLM2gKGk+xiUNOzXgDwWvEN5bf2REREeudsSXfesWNOlZI3XbUSIYpMPy0Un+hmZWVhx44dGDduHB5//HE0atQIffv2tbtPo0aNcOrUKQDWBjA//vgjxo8fj/vuuw87duywBZZc0bt3b+zatQtvvPEGNm3ahE2bNqFBgwZ45ZVX0L59e9x9992Ii4srd/8dO3Zg0KBBMBgMWLNmjUOl9JUZN24cpk2bhgULFuCNN95AUFAQ5s2bB4vFgkcffbTSYKu7KYO1I0aMKLN+xIgRWLp0qaqJjj3x8fEYMWIEOnXqhJYtW+KRRx7BoUOHbOvffvttfPvtt/joo4/w2GOP2ZYPGjQIixcvRocOHfDcc8/ZgpoLFy7EqlWrsGDBAtSrV8/Vh2kzevRoJCYmVrhNcZbx1atXkZOTU2G2Zk5ODlJSUgCgTHaqUkREBDp27Igvv/wSKSkpePXVVzFgwIByy/qXL1+OBx54ALVq1cL3339vmwuWiIiqTpmpCc6pWWVaBTWNBm2O60/YKMizpgOoV3Q5WLr8hohK8EWaiIqFtGiB8Jtvdmif8C5dNA9oKkVERKB///5YtWoVCgsLMXr0aGRnZ1e6X7Vq1TBy5Eh8+eWXSE9PxyOPPOK218ebbroJixYtQnJyMvLz8/Hnn3/i1VdftXVlLy+YtGXLFtx5550QQmDdunXo2bOnW8ZTrF69ehg8eDCSk5OxatUqWCwWzJs3DwaDAePHj3frsapCOYdpTExMmfXFQc+cnJwq3V+jRo3Qtm1b/P7777h69apteXEzoD59+pTZp3379oiNjcXp06dx7do1AMDevXsBWIOQQgjVBQDOnz9vu52amlqlsVVVw4YN0bBhQ5jN5ko7jm/cuBFmsxkxMTFo1qxZle5/4MCBkFJi06ZNdtd/++23uPfeexEfH4/Vq1erpikgIqKqE4o5NZmpWXVaBTUNiuMyBG0fg5oeJKXMkFJekFJeAFBQ1cnPiQKFqoyNQU0iUqj51JNAVd83DQbUfPIJzw7ISTfddBMmTJiAc+fO4T//+U+V9xs8eDAGDhyIPXv24KuvvvLY+PLy8rBw4UIYDAY88MADZdb//PPPGDRoEEwmE3788Ud0797dI+MonjczISEBP/74I06fPo0BAwagUaNGHjleRWJjY9GhQwcAUGVWFite1rhx4yrf54ULFwBAlXWal5cHALhy5UqZ7fPy8pCeng4ACA4OBgD06NED48aNs3sBgPDwcNvtkJCQKo+tqiZMmAAAeOutt8r9QGexWPDWW28BAO6///4qN/45f/48APvdzL/66is8+OCDqFu3LjZt2oTmzZs7M3wiIgIAZaYm59SsMl8IavLjsn2MshGRdtgoiIjKEdGjB+pMm1p5YNNgQJ3XpyGiRw/vDMwJr7zyCkJDQ/H+++9XqRlPsddffx0A8Nprr8Fsdm12+KysLNtckcUKCgrwxBNP4NSpU3jiiSfKZNWtW7cOd911F0JDQ7F+/fpyMzmVxowZAyFEpeXMpQ0aNAj169fHDz/8gGnTpgEoCaJp4amnngIATJ48WTUvpDI4rQwC//HHH7a5NJUsFgsmT56My5cv45ZbblGVtt96660ArEHC4gBnsSlTpsBsNqNLly6IirJOz37//fcjISHB7gWwZpAW31aWhycmJkII4VLzJAD461//inbt2mHr1q0YP358mUzVnJwcTJgwAVu3bkV8fDxeffVV27q8vDxs377d7v3++uuvmD17NgwGQ5lu5gsWLMCoUaPQsGFDbN68mSXnREQOOvfSFtUla+dF27rUFX+WWU/2aRfULLnOGLR9nFOTiHyCBF+liUgt5v/+D0H16uHqrE+Q/euvZdaHd+mCmk8+4dMBTcBaXv3YY4/ho48+wrvvvot///vfVdrv5ptvxt13340VK1Zg7ty5qnkX3377bfzxxx8ArN2rAWD+/PnYunUrAKBXr16q0u0NGzZg/Pjx6N+/Pxo0aID09HSsXr0ap06dwpAhQ/D++++rjn3kyBHcfffdyM3NxeDBg7FixQpbl3Sl4q7ZxYqbItrLuKuI0WjEo48+imnTpmH79u2oXbs2hg1zfF7V8igDesW/txdffNEWMBw/fjx69epl2+bRRx/F999/j+XLl6N9+/a48847kZWVheXLlyMlJQXPPvssevfubdt+7dq1+Mc//oHbbrsNzZo1s3X/3rRpE06cOIHatWuXae4zefJkrFq1CuvXr0fr1q0xcOBAhIWFYdu2bdi1axfCwsLw0UcfufzYnX1OSouIiMDatWsxdOhQzJs3D6tXr8bgwYNRu3ZtJCcnY/Xq1bh48SKqVauGFStWqOb9zMnJQc+ePdG6dWt06tQJ9evXR3Z2NpKSkvDzzz8DAN577z20bt3ats+GDRvw6KOPwmKxoE+fPpg/fz6AkgzXkJAQxMTE4Pnnn3fpcREREVVGWXmg2ZyaXjuqf2FQk4g0wzxNIqpMRI8eiOjRA3nHjiFrx05YsjJhiIhERI/uPjWHZmUmTZqEOXPmYMaMGXj++edR1Xm2p06dipUrV+L111/H6NGjERoaCsAaRCs9/+D27dtV2XDKoGbLli3Rs2dPbNq0CZcvX0ZYWBjat2+P1157DY888kiZMuGLFy/aMhSXLFmCJUuW2B1f6aDmwYMHERUVVWHH8vKMHz8eb7zxBiwWC8aOHetyEE5pwYIFZZYtXbrUdr13796qoKbBYMC3336LmTNnYv78+UhISIDBYED79u3xxBNP4OGHH1bdV//+/TFx4kRs27YNBw4cQGpqKiIiItCyZUuMGjUKzz77LGJjY1X71KtXD3v37sU777yD77//HvPnz4fFYkGdOnUwZswYvPjii6ogn7MOHjwIAHanF3BU/fr1sWvXLixYsABff/01Vq5ciZSUFFvgdOjQofjss89Qu3Zt1X4RERGYNm2arUnV1atXIYRAvXr18PDDD+Opp55Ct27dVPucPn3adr/z5s2zO55GjRoxqElEVIH6b9+qup363QlkbrVO+RE9uAmibquvxbD8ji+UnzNT0z7B5hzeIYTY06lTp07Fk/HrSfGE8cqMBaKq+PTxR5B53dohdeKsRETVqKnxiPwL//dIa0lJ1u7mbdq00Xgk3peRkQEAtkw/AlJTU1GjRg288MILePfdd7UeDhXp1KkTTCZTpd3aXbF+/XoMGjQIzZs3x6ZNmyrseu6qQPjfsxRacPVcJgBAGARqNSh5rIH8ukva47mn/0tdfQKZm4uCmoOaIOp2BjWrYufOnVi7di0AoFu3bhg0aJBXjnvuejZ6vbMBAFAjVGDPlMFeOa63de7cGXv37t0rpezs6L6cU5OItMM5NYmIdGPLli0ICgrC3/72N62HQkXS0tJw4MABvPzyyx49Tr9+/fDJJ58gKSkJd9xxh0NzxxIRkTdpk3Ho77TK1GT5eeVYfk5EPoFvqkRE/m3o0KGqpjqkvejo6DINojxl3LhxMJlMOHnyJLZu3YqhQ4d65bi6x9MjInIj5pQ4h+XnvotBTSLSDt9ViYiIdGP06NFaD0EfeH5ERJ6ifHlhUkmVaRXUFOx+XikGNT1ICBEFoHgSnKDiyc6JyB6+ShMREREREXmMKkqm3TD8jWbl58rj8vOyXZxT07NeAHC+6HJjcnKyxsMh8i2C/c+JiIiIiIi8gx+/nMLyc9/FoKZnTQdQr+hyMD4+XuPhEPkwvkgTERERERF5B8vPq8wXgpp8uuxj+bkHSSkzAGQAgBCiwGBgDJlIhd8UEhEREREReQeDZE7RLKipCCExU9M+RtmIyCew+zkREREREZHnCDYKcopPZGp67aj+hUFNItKMUL+rajYOIiIiIiIi3RMslXOGZo2CDMysrQzLz4lIQ3xTJSIiIiIi8joGyco1ZcqUctf99ttv+O2336q8vStUzer5fNnFTE0i8gn85omIiIiIiMiDlBEgfgDzeSw/rxwzNYlIM6x+ICIiIiIi8hLlBzBGycpVOvMyJycH77zzDgDgb3/7G6pVq+aVcRgVzxczNe1jpiYR+QZ+U0hERET/z959x1VZ938cf12AILgSZ+6VuQea/rIUEi1XOXJkQ8mRNhxlaaUpaFauzMyWGJjlXbm35sCtmXrflbn3yomCEwS+vz/wnDiegwKCiL6fj8d5wPmO6/pc53jB4eN3iIjIHaE/v1LO29vb/v2dSmiCcw5am+s6U1JTRDKPhmqKiIiIJEt/vopIetJGrVmLZVmOiU29ZU6U1BSRu4LRL1URuYUJPVcwoeeKzA7DpcQPnf8+3N3d8fX1JSAggPDwcJf/s37w4EGnfh4eHhQsWJAmTZowZ84cl+fas2cPI0aMoGHDhhQvXhxPT08KFSpEy5YtiYiIuGmckydPpk6dOuTMmZM8efIQEBDA/Pnzk21/5coVhgwZwsMPP0z27NkpWLAg7du3Z8eOHal6fYKCgrAsi/DwcAB+/fVXLMuibt26t+z7448/YlkWrVq1spfd+Lrd+LCdB+Do0aMMHz6cdu3aUa5cOdzc3LAsi717997y3AcPHuTVV1+lTJkyZM+enXz58lG3bl3GjBnj1HbAgAEEBgZSvHhxvL298fX1pWbNmoSEhHD27FmXx4+JiWHChAnUqVOH/PnzkzNnTipWrEjv3r05dOjQLeOLiYmhSpUqWJZFsWLFbtn+RsePH+fNN9+kUqVK+Pj44O3tTYkSJfD392fgwIHs27fPZb9z584xdOhQ6tSpQ968ecmePTslSpSgY8eOrFy5MsXnX716Ne7u7liWxaBBg1Id/71K/+UrIhkm6Q+YhEyLQlIh6RT0eGU1nWhNzQxkWVYuINf1p9kSEvRTQyQpSx/bReQeM2TIEACuXbvG3r17mTVrFqtWrWLz5s188cUXLvvkyZOHvn37AolJqr///pv58+ezZMkSRo0axdtvv+3Q/oMPPuDnn3+mUqVKNGvWDF9fX3bt2sXcuXOZO3cu48aNo3fv3k7nefvttxkzZgzFihWje/fuxMbG8tNPP/H0008zfvx43njjDYf2MTExNG7cmHXr1lG7dm369OnDkSNHmDZtGgsWLGDFihUpSkq60rhxY0qXLs2mTZv4888/qVatWrJtQ0NDAejevbtTne31vlGNGjXs32/evJlBgwZhWRalS5cmT548nD9//pYxLlmyhDZt2hAXF0eLFi3o0KEDFy9eZNeuXcyaNYt+/fo5tB87dix+fn40btyYggULcunSJTZu3EhwcDDffvstGzdupHjx4vb2cXFxBAYGsm7dOipUqEDHjh3x8vLi999/Z/z48Xz//fesX7+eSpUqJRvj+++/n6Lkpyvbtm3D39+fyMhIqlatSufOncmTJw+HDx9m27ZtfPTRR5QuXZqyZcs69Fu9ejXPPvssZ86coWLFirzwwgvkypWLPXv2MHfuXH766Se6d+/Ol19+iYdH8n9qXLhwgc6dO+Pj48PFixfTdA0iIpJK+vMry0ncLCgxmZmgpKYzY4weGfQAgrm+9AFgHnzwQXMvioiIMBEREZkdhmRBob26mdHtm5vR7ZubyONHMzucLEf3nmS27du3m+3bt9+x833RY7n5osfyO3a+m4mOjjbR0dH259iXOnK0du1a4+bmZizLMvv373eoO3DggAFMyZIlnfr95z//MYDx8fExly5dcqgLCwszW7dudeqzcuVKky1bNuPp6WmOHz/uULdu3ToDmLJly5rIyEiHGHx9fY2Xl5c5cOCAQ5+PPvrIAKZt27YmPj7eXj579mwDmEqVKjmU30znzp0NYMLCwuxlw4cPN4Dp1atXsv327NljLMsyxYsXdzhXcq+3K0eOHDGrV682UVFRxhhj/P39DWD27NmTbJ99+/aZnDlzmuLFi5tdu3Y51cfGxjqVXblyxeWx3n//fQOYV1991aH8l19+MYAJDAx0eh0HDx5sAPPyyy8nG2NERISxLMt89dVXBjBFixZNtq0rgYGBBjDBwcEu6/ft22d27NjhUPb333+bHDlyGDc3N/P555+bhIQEh/rDhw8bPz+/W76vxhjz8ssvm7x589r/HQwcODBFcd94792LEuITzMmDUYmPQ47Xeqd/7ookpc+eWV/0mqPmyIDV5siA1ebc3L2ZHY6kQPmBC03JAfNNyQHzzZXYuMwOJ0Nc/+ywxaQh76bp5xlrDFD0+uOvQoUKZXI4IncZ/U+hiNzjHnvsMSpUqIAxhi1btqS4X4cOHciZMyeXL19m+/btDnVBQUHUrFnTqY+/vz8BAQHExsayfv16h7qvv/4agIEDB5I3b157ealSpXj99deJiYkhLCzMXm6MsfcZOXIkbm7/fmRs2bIl9evXZ/v27axatSrF13SjLl264OHhwQ8//MDVq1ddtgkNDcUYQ9euXR1iSI1ixYpRv379VC3sHxwczMWLF/nqq68oX768U322bNmcyrJnz+7yWO3btwcSlw1Iav/+/QA0b97c6dpatmwJwOnTp10eMzo6mqCgIAIDA+nZs+ctrsY127+RPn36uKwvU6YMFSpUcCjr3bs3ly5don///vTq1euGtdmgePHizJ8/n7x58zJ+/Hj++9//ujz2nDlzCAsL4/PPP6dIkSJpiv/+oVE5IpJB9OMlS3B3SzL9XFugO1FSMwMZYy4YY44bY44D19L6YVzkfqCR9CJyrzLXf8C5SoSldz9b2xun/a5YkbgWaZMmTZz6NG3a1KENwL59+zh8+DDly5endOnSKeqTWoULF6ZFixacO3eOGTNmONXHxcUxefJk3Nzc6NKlS5rPk1rXrl1j+vTpFCxYkGbNmrFp0ybGjh3LqFGjmD9/PrGxsak63rx58wCcpthXrlwZgEWLFnHjEkW2dU4bNWrk8pi9e/fm3LlzTJo0KVWxJJUvXz4Adu/enaL2Bw4cYPny5Xh5edG/f/9k2z344IN069YNgG+++cap/tSpU3Tv3p1WrVrx4osvpiFyERFJK8dNZ/QHWFbgluRN0/RzZ1pTU0QyzY0jPETk3pJRm/pkxHFf/7phuh8TEtcf3LVrF56entSpUyfF/X788UcuXbpEgQIFePjhh1PU59ChQyxfvhwfHx8aNGhgL7906RLHjh0jZ86cPPjgg079HnroIcAxubVr1y4Al6MUk+uTFq+88gqzZ89m4sSJvPDCCw518+bN48SJEzRv3txhLcqkgoODncpKlSpFUFBQmmPatm0bV65c4dFHH+W5557jl19+cagvUaIE06dP55FHHnHZf/To0Vy8eJGoqCg2b97M2rVrqVatGu+++65Du+bNm9OmTRtmzpxJ1apVadSoEZ6enmzZsoW1a9fSq1cvp3VOAWbNmsXkyZMJDQ2lRIkSab7ODh06MGbMGJ555hleffVVnnjiCWrUqJHsiNa1a9cCUKtWLYfRvq40btyYUaNGsXr1aqe6V155hYSEBPtIYLkF/f0qIunJIauZeWFIyiV9yzRQ05mSmiJyl9BPaBHJ+mxJtqQbBRljGD16tMuEIsD58+ft/WJiYti2bRsLFizA09OTb775JtlpzUnFxMTwwgsvEBMTw8iRIx2STlFRUUDihkSu2MqTbp6Tlj5p8dRTT1GyZElWrVrFnj177MlS+HeDoFdeeSXZ/iEhIU5l/v7+t5XUPHXqFACrVq3C29ubSZMm0apVKy5evMiECRMYOXIkzZo1Y8eOHeTPn9+p/+jRozl58qT9eZMmTQgPD6dAgQIO7SzLYvr06QwdOpRhw4Y5LDMQGBjI888/j7u7u0OfkydP0qNHD5o2bUrXrl3TfI0Aw4cPJzo6mrCwMIKDgwkODsayLMqXL0+TJk3o3bs3ZcqUsbf/559/AJJNMCdla3P06FGH8u+++445c+bw888/o2WZbkL/5ysiGUU/X7KcpNPPE5TVdKKkpojcMWM6tEi2LuxN5zXB+v08PyPDERFJdzcm2SzLYtKkSbz88svJ9omKinLq5+XlxZw5c3jqqaduec74+Hheeukl1q1bR4cOHZx2S0+p1Iyet01Zu90R97ap5UOGDCE0NJQRI0YAicmwxYsXU6RIEZo3b37LONJTfHy8/evHH39sn/ru6+vLiBEj2Lt3LzNnzmTixIm89957Tv1PnDgBJCYg169fz7vvvkvNmjWZP38+fn5+9nZXr16lU6dOLFq0iAkTJtCyZUt8fHxYt24dvXv3pkGDBkybNs2+viYk7gB/7do1Jk6ceNvX6eXlxbfffsuwYcNYvHgxv/32G1u3bmXz5s2MGzeOb7/9ll9++YUWLRJ/d6fmPbe1TbpW6sGDB+nbty/t2rWzrzMqIiJ3WNIf4ZrKnCVo+vnNKakpIiIiGSK9p3Tbpp1n1FTx9GBL5ly6dIkNGzbQtWtXevbsScmSJWnY0HXcJUuW5ODBg0DiBjBLly6lW7dutG/fng0bNlCpUqVkzxcfH8+LL77ItGnTaN++PT/88INT0sk2qtI2+vJGrkZl3qpPdHS0U5+06tq1K0OHDmXy5Ml8+OGHZMuWje+++46EhAS6dOniNFoxoyUd5dq6dWun+tatWzNz5kw2bdp00+MUKlSI1q1b4+fnR/ny5enUqRPbtm2z13/yySdMmzaNcePG0aNHD3t506ZNmT59OjVq1KBPnz72pOb333/PvHnzmDx5MkWLFr3dy3SIs3PnznTu3BmAyMhIBgwYQGhoKF26dOHo0aN4enraRxofPnz4lse0jdBMOjq1S5cueHt78+WXX6Zb7CIikkqafp7lJE1qxiup6UQ714jIHdPv5/kOj7xFitnrgj79yqleRCSrypEjB40aNWLevHnEx8fTuXNnLl++fMt+uXPn5tlnn+XHH38kOjqaTp06JTsaMS4ujo4dO/LTTz/x/PPPM3XqVKcNgmyxFC1alIsXL9qnECdl25U76fqZtnU8k1sz01WftCpatCjNmjXj5MmTzJs3j4SEBL777jvc3NzsG87cSUnXMH3ggQec6m1JzytXrqToeCVLlqRSpUr8/fffnDlzxl5u2wzoiSeecOpTvXp1fH19OXToEGfPngVg69atAHTu3BnLshweAMeOHbM/v51lAXx9ffnmm28oUaIEp0+ftidiH3/8cQC2bNlyy+MvW7YMSFx/02br1q2cOnWKAgUKOMRuG8U8fPhwLMuiVatWaY5dRERuwWGkZqZFIangljQPrffMiUZqioiIiGSQatWq0b17d77++mvGjh3LwIEDU9SvWbNmNGnShMWLFzN16lSnTXRiY2Np3749c+bMoVOnToSFheHmlvz/VTds2JApU6awePFip6nwixYtsrexKVu2LCVKlGD37t0cOHDAaQd0V31uxyuvvMK8efMIDQ0lR44cHDp0iCZNmlCyZMl0OX5q+Pr6UqNGDf73v/+xbds26tat61BvS/KVKlUqxcc8fvw4gMOo05iYGABOnz7t1D4mJsY+GtbT0xOARx99lIsXL7o8/qRJk/Dx8aFjx45A4tTy2+Hm5kaOHDmAf0cflylThsDAQJYvX86oUaMYPny4y74nT560r4f6/PPP28s7derkMrG/Z88eVq9eTY0aNahVqxY1a9a8rdhFRCR5VpKspnY/zxo0/fzmNFJTREREJAMNGjSI7NmzM3r0aM6dO5fifsOGDQNgyJAhxMXF2ctjYmJo3bo1c+bMoWvXrrdMaAL07Jm4bvHw4cMdYjh48CATJkzAy8vLIdlpWZa9T//+/UlISLDXzZkzhzVr1lCpUiX8/f0dzhMUFIRlWYSHh6f4OiFxynWxYsVYsmQJQ4cOBRLXj8wsr7/+OgADBw50WBfy6NGjjB07FoDnnnvOXr5z5077WppJJSQkMHDgQE6dOkW9evUcprbXr18fgI8++sie4LQJDg4mLi6ORx55hFy5cgGJu5WHhoa6fEDiCFLbc29vb/uxwsPDsSzLafOkkJAQ+7IHN5o+fTo7d+4kb968VKlSxV4+btw4cuTIwYgRI/jqq6+c+h07dowWLVoQGRlJgwYNaNeunb3u888/dxm77d9d8+bNCQ0Ntb/2IiKSATRSM8tJulFQvDYKcqKRmiIiIpIl3M1rad5M0aJF6dGjB+PGjWPkyJF8/PHHKepXu3ZtWrZsyZw5c5g0aZJ93cWePXuycOFC8ufPT9GiRe1JwKQCAgIICAiwP69Xrx5vvfUWn376KdWqVaNt27bExsby888/ExkZyfjx451GHr711lvMnz+f6dOnU7duXQIDAzl8+DDTpk3Dx8fHPkU8KVvy09U0+Jtxd3enS5cuDB06lPXr11O4cGGeeeaZVB3jZpIm9Hbu3AnAgAED7AnDbt262adXQ+L6jwsWLGD27NlUr16dp556ikuXLjF79mwiIyPp3bu3w+u7ePFi3nnnHRo0aEDZsmXJly8fJ0+eZNWqVezfv5/ChQs7be4zcOBA5s2bx/Lly6lQoQJNmjTB29ubdevWsWnTJry9vRk3btxtX3ty78nYsWMJDg6mZs2a1K5dmwIFChAVFcXWrVvZsGEDHh4efP311w6jPitXrszChQtp06YNr732GhMmTOCJJ54gV65c7N27lwULFnD58mUqVarEzJkz7/h6qCIicgvaKCjLsTT9/KaU1BQRERHJYO+99x4TJ07k888/p2/fvhQqVChF/UJCQpg7dy7Dhg2jc+fOZM+enQMHDgBw5swZlwlNm6RJN4AxY8ZQrVo1vvjiC7799lvc3Nzw8/PjnXfese9wnZSXlxfLli3jk08+YerUqYwdO5bcuXPTqlUrQkJCXG5g9Ndff5ErV66b7lienG7duvHhhx+SkJDAyy+/nOrE6M1MnjzZqWzmzJn27wMCAhySmm5ubkybNo0JEyYQFhZGaGgobm5uVK9enVdffZUXX3zR4ViNGjXilVdeYd26dfzxxx+cP3+eHDlyUL58eV566SV69+6Nr6+vQ5+iRYuydetWRowYwYIFCwgLCyMhIYEHH3yQoKAgBgwYQIUKFW772v/66y/AcWQpJK7puWjRIlatWsXixYs5efIkHh4eFCtWjG7dutG7d2+qVq3qdLwGDRqwe/duxo8fz7x58/j+++/tU+UhcWTyoEGDbnsKvIiIZIAbNhOUu5+mn9+cpXUU7gzLsrb4+fn5bdmyJbNDSXcrV64EnP94ErmV797sybnjiTukBn36FfmKFs/kiLIW3XuS2Xbs2AFAxYoVMzmSO+/ChQsA9pF+AufPnydfvnz069ePkSNHZnY4cp2fnx8eHh633K39doWEhBAcHEz79u2ZOnVqho3SvB/uPWMMpw9fsD8vWDK3/fv7+eeuZD599sz6Lm05yblpiZsA+tQsiG+Hh2/RQzLbE6NXcuDMJQBW9POnTIGcmRxR+qtVqxZbt27daoypdevWjjRSMwNZlpULsH3iypZ0PSoRERGRe8maNWvIli0bb731VmaHItdFRUXxxx9/MGPGjAw/15AhQ9i3bx9TpkzB29ubsLAw+87sIiJyl3DTRkFZTdJfpVpS05mSmhmrHzDE9uTkyZOZGIqIiIhIxnn66acdNtWRzJcnTx7i4+Pv2PlCQ0OpVKkSV69eZdu2bS6nr4uISOaxtFFQlqPp5zenpGbGGgN8e/37xYUKFdInOxERERG5J3l6evLuu+9mdhgiIpISSpBlCe5Kat6UkpoZyBhzAbgAYFnWtRt3CBUREREREUnqxmn7xhhN5ReR9KGfJVmOw/RzrWjoRFk2EREREREREZF7naafZznubhqpeTNKaoqIiIiIiIiI3OsckppKkGUFWlPz5pTUFBERERERERG5xyVdykL5sazBTbuf35SSmiIiIiIiIiIi9xMlyLIEtyRZzXhlNZ0oqSkiIiIiIiIicq9LuuuMhmpmCW4Oo2v1nt1ISU0RERERERERkXudNgrKcpJOP9dITWdKaoqIiIiIiIiI3OusWzeRu4vjRkGZGMhdSklNEREREREREZF7naafZzmafn5zSmqKiIiIiIiIiNzrlNPMctySZO3i9aY58cjsAERERERERO5npw5FJ1t3+vAF+/fXYuM5988lKla8E1GJyL3G0kjNLEfTz29OIzVFREQkSxjToQVjOrTI7DBcsizL4eHu7k7+/Plp2LAhP/74Y7L9du7cSa9evahSpQp58uTB09OTIkWK0Lx5cyZNmsTVq1eT7Tt8+HD7+Xbt2pVsu/DwcKf4vLy8KFmyJC+88AJ//PGHy35Hjx5l+PDhtGvXjnLlyuHm5oZlWezdu/emr8WVK1cYMmQIDz/8MNmzZ6dgwYK0b9+eHTt2JNvn6NGjdOnShSJFiuDl5UWpUqXo27cv586du+m5blSqVCksy+LgwYMAvP/++1iWxYABA27Zt3v37liWxWeffQbAypUrnV63Gx+28wBs2rSJ9957j6ZNm1K4cGEsy6JYsWIpinvNmjU8++yzPPjgg3h5efHggw/y5JNPsnDhQod2R44c4bXXXqNu3boULlwYLy8vihQpQv369QkLC+PatWsuj3/q1Cn69+9PlSpVyJUrF/ny5aNWrVqMGjWKCxcuuOyT1O7du8mRIweWZfHiiy+m6JqS+v3333nhhRcoWbIkXl5e5M6dm7Jly/L0008zcuRILl26lOx5X3/9dSpUqEDOnDnJkSMHtWrVol+/fuzbty/F5x82bJj9PVu2bFmq4xcRuScpQZYlOCY19abdSCM1RURERNLJkCFDALh27Rq7du1i9uzZREREsGXLFj799FOHtkOHDiUkJISEhAT+7//+j86dO5MzZ05OnjzJypUr6datG1999RWbN292Oo8xhkmTJmFZFsYYJk6cyOjRo28aW/Xq1WnVqhUA0dHRrFu3jqlTpzJjxgyWL1/OY4895tB+8+bNDBo0CMuyKF26NHny5OH8+fM3PUdMTAyNGzdm3bp11K5dmz59+nDkyBGmTZvGggULWLFiBXXr1nXos2/fPurVq8epU6do2bIlFSpUYNOmTYwbN47Fixezbt068uXLd9PzJqd79+588sknhIeH8+GHH5ItWzaX7S5dusTPP/+Ml5cXL730kkNdyZIlCQoKctnvgQcesH8/depUxo0bR7Zs2ahYsSInT55MUYwffvghH3zwAfnz56dFixY8+OCDnDlzhv/+97+sXLmSZs2a2dvu27ePH3/8kbp169KqVSt8fX05e/YsixYtokuXLnz//fcsXboUD49/P+IfPHiQunXrcurUKQICAmjatClXr17l119/pX///vzwww9s3LgRb29vl/HFxcXx0ksv4eaWtrEQP/zwA507d8YYQ8OGDWndujXu7u4cOHCAzZs3M3/+fNq0aUO5cuUc+n3++ee89dZbJCQk0KBBA1q0aIFlWWzatInQ0FDCw8OZMGEC3bt3v+n5t27dyrBhw8iZMycXL15M0zXcCQVL5nZ4HnX6CjGXE5PUBUrkso+uOnvZ3amtiEiKaaOgLCfp7ucJGqrpzBijxx14AFv8/PzMvSgiIsJERERkdhiSBU3q28OMbt/cjG7f3Jw5ejizw8lydO9JZtu+fbvZvn37HTuf7efF3SA6OtpER0fbn5M43sGp3bJly4xlWcayLHPgwAF7+fDhww1gihcvbjZu3OjyHPPmzTMBAQEu6xYvXmwAExQUZAoVKmTy589vYmJiXLYNCwszgOncubNTXY8ePQzg8jxHjhwxq1evNlFRUcYYY/z9/Q1g9uzZ4/I8xhjz0UcfGcC0bdvWxMfH28tnz55tAFOpUiWHcmOMefLJJw1gPv/8c4fyN9980wCmR48eyZ7vRiVLljSAw2vduHFjA5gZM2Yk2y80NNQA5vnnn7eXRUREGMD4+/un6Nz//e9/zdatW+3vA2CKFi160z6//PKLAUyjRo0c/j3ZxMbGOjyPiYlxev1s7QICAgxgfv75Z4e61157zQAmODjYoTwuLs40bNjQAGby5MnJxhgSEmI8PT3NuHHjDGBeeOGFm15TUpcuXTK5c+c27u7uZtmyZS7brFu3zpw7d86hbPLkyQYwvr6+ZtWqVQ510dHRZvHixcbX1/eW7+uVK1dMpUqVzKOPPmpeeuklA5ilS5emOP7MdvJglDl5MMqh7E7/3BVJSp89s74reyLNkQGrzZEBq82pb/7I7HAkBbqGbzIlB8w3JQfMN7/+fSKzw8kQfn5+Bthi0pBr0/RzERERkQwSGBhIhQoVMMbw+++/A4kj54KDg8mWLRsLFy50Grlo06JFCxYvXuyybuLEiUDiSMQXXniBM2fOMGvWrFTH17VrVwB7bEkVK1aM+vXrkzt3ykaFGWP4+uuvARg5cqTDyL6WLVtSv359tm/fzqpVq+zl+/fv59dff6VUqVK8/vrrDscLCQkhR44cTJkyJdnpySnxyiuvAP++Zq7Y6mxt06JGjRrUrFkTT0/PFLVPSEhgwIAB+Pj4MHXqVHLlyuXU5saRpZ6eni5HTGbLls0+CnfPnj0Odfv37wfgmWeecSh3d3enefPmAJw+fdpljJs3b2bYsGF88MEHVKtWLUXXldS2bduIjo6mSpUqBAYGumxTr149hxGvFy5coG/fvkDi6NcGDRq47GNb1uG1115LdpmG9957jwMHDhAeHp7mkaYiIvcUS0M1s5qk66DGa6SmE/12z0CWZeWyLKuIZVlFgGwJCQmZHZKIiIjcYeb6+ke2D6W2tQ+fffZZqlSpctO+Xl5eTmUnT55k7ty5lC9fnnr16vHyyy8D8O2336Y5tuSmZafGvn37OHz4MOXLl6d06dJO9U2bNgVgxYoV9jLb908++aRT0ilXrlw89thjXL58mY0bN6Y5rpYtW1KwYEF+/fVXDh8+7FS/bds2fvvtN8qXL4+/v3+az5Na69ev58CBAzRr1oy8efOyYMECRowYwbhx49iwYUOqjhUfH29ff/PG5GPlypUBWLBggUN5QkICixYtws3NjYYNGzod88qVK3Tq1IkaNWrw7rvvpioeG9uyAcePH09xYnr69OmcO3eOOnXq8NRTTyXbrkmTJjzyyCOcPHmS2bNnO9VHREQwbtw4Pv74Y8qXL5+m+EVE7mW2zwByd3NPktTUe+ZMa2pmrH7AENuTlK6tJCIici/IqE19MuK4/X6en+7HBFi2bBm7du3CsiweeeQRANauXQuQ7Mi1W7ElRW3rPFapUgU/Pz8iIiLYu3ev09qEN2Mbofj444+nKZakbJsVJZdAeuihh4DEzV9S0+fXX39l9+7daX69smXLRlBQECNHjuS7774jODjYoT7pqFdXbCNrbxQQEEBAQECaYoJ/R8cWKlQIPz8//vrrL4f6Bg0aMH36dAoUKODU98yZM3zxxRcYYzh9+jRLly5l7969PP/887Ro4Xh/9O/fn/nz5/PBBx8QERGBn58fsbGx/Prrr5w4cYLQ0FBq1qzpdI53332X/fv3s3XrVoc1OlOjTJkyPPLII/z+++889thjdO/enXr16lG5cuVkR7Ta7o9GjRrd8viNGzfm999/Z/Xq1Tz33HP28qioKIKCgqhfvz69e/dOU+wiIvcix93PMy8OSbmk/+ergZrOlNTMWGMA27CJxYUKFaqamcGIiIhIxrIlv5JuFGSM4c0336RkyZIA/PPPPwAp3hk7KWMMoaGhuLm50alTJ3t5UFAQW7duJTQ0lE8++cRl3//973/2+KKjo1mzZg2bN2+mSJEijBkzJtWx3CgqKgqAPHnyuKy3lSfdbCgtfdKie/fujBo1irCwMAYPHmwfFRoTE8MPP/yAp6cnnTt3dtn30KFDhISEuKy7naTmqVOnAPj6668pXbo0y5Yto27duhw6dIh+/fqxZMkS2rVrx8qVK536njlzxiEmy7J4++23+eijjxz/YAUKFizIxo0b6dKlC7NmzbKPjrUsi+7du7tMHi5fvpzx48fzySefUKlSpTRfo2VZTJ8+nc6dO7Ny5UreeOMNIDHRXLNmTdq0acOrr77qsMSB7f4oXrz4LY9va3P06FGH8l69enH27FkiIiKcXo+sRBsCiUi6SzopQgmyLMFh+rlGajpRUjMDGWMuABcALMu6prV8RERE7m22RJNlWTzwwAPUr1+frl278uKLL9rb3DgdPTVWrFjBvn37eOqppyhatKi9/Pnnn+ftt98mPDycYcOGuZxO/scff/DHH384lJUoUYI1a9ZQokSJVMeSWmm57tt5rZIqV64cAQEBREREsGTJEvtU+BkzZhAZGUmHDh1cjogE8Pf3d5lYvF3x8fFA4jVOnz6d6tWrA4nTxWfNmkX58uVZtWoVGzZs4NFHH3Xoa1unNT4+nmPHjjFr1iwGDx7M2rVrWbBgAb6+vva2Bw8e5JlnnuHKlSssXLjQPqV/zpw59OvXjzlz5rBhwwb7kgHnz5/n5Zdfpm7duvTr1++2r7NEiRJERESwY8cOli5dyubNm9m0aZP98eWXX7Jy5Ur7+VPzntvaJl1Tc+bMmUyZMoUJEyZQpkyZ245fROSepQRZlqDp5zenpKaIiIhkiPSe0m2bdp5RU8XTQ0o+bBYpUoSdO3c6jS5LCdu6mbap5zb58uXj6aefZsaMGcyZM4e2bds69e3cuTPh4eEYYzh16hSTJk1i0KBBPP3002zYsAEfH59Ux5OUbVSlbfTljaKjox3apbVPWr3yyitEREQQGhpqT2qGhoYCyU89z0h58+YFEqdo2xKaNt7e3jz11FNMmjSJTZs2OSU1bdzd3SlRogR9+vShUKFCdOzYkcGDB/PFF1/Y2wQFBfHXX3/xxx9/2NfbzJ07Nz169ODq1av07duXkJAQwsPDAXjrrbc4c+YMS5cuxd3dPd2ut2LFilSsWNH+fOfOnXTp0oUNGzbw5ptv2tfFfPDBBwFcrn96I9s9ZEtIR0ZG0qNHDxo2bMirr76abrGLiGRVR99dk2xd7OELTvXFPqmf0SFJKrkl+T++BCU1nWjooIiIiMgdZFu/cvny5anqd/r0aXvip2PHjliW5fCYMWMGcOsNgyzLolChQrz//vv069ePP//8k0GDBqX+Qm7w8MMPA45rZiZl25U76fqZaemTVm3atCF//vzMmzePkydPsm/fPlauXEnZsmVdbpST0WzXnnTn76RsSc8rV66k6Hi2RG3SUaUXLlxg1apV+Pr6uty9/IknngBgy5Yt9rKtW7dy5coVKlSo4PDvy9b2xx9/xLIsatSokaK4klOhQgWmTJkCOG4eZbs/li1bdstj2NrUqlULSEyEnjlzhhUrVuDm5uYQ/+TJk4HEdTgty+Kzzz67rfhFRETuBDeH3c8zMZC7lEZqioiIiNxBL7/8Mh9//DEzZsxg+/btN12zMCYmxr4D+uTJk4mNjaVWrVrJJpTmzp3LsmXLOHDggMsdyG80ePBgJk+ezBdffEGvXr1S1Cc5ZcuWpUSJEuzevdvl+RctWgTgkEC0Jcp+/fVXEhISHHZAv3DhAuvWrcPb25v/+7//S3NcNrZ1M8eMGcPkyZM5d+4cxhi6d++eKesuNmjQAA8PD/bs2UNsbKzTxjnbtm0DoFSpUik63rFjxwAcNvWJjY0FEke8ujrH6dOnARzK27RpQ+3atZ2O/88//7Bw4ULKli1LQEBAuixZkCtXLsBxhHO7du14++232bRpE0uXLqVx48Yu+y5dupRNmzaRLVs22rVrBySOWO7atavL9qtXr2bPnj00bdqUIkWKUKVKlduOX0TkbnfjyEsTl8CxQesAKDL4/3DzcV6uRu4ubkmGamqkpjON1BQRERG5g0qVKkVwcDCxsbE0b96czZs3u2y3ePFi++g7+Heq9JdffkloaKjLR48ePeybCaVErly5GDBgANeuXXO5w3dqWJZFz549gcQdtxMS/h1OMGfOHNasWUOlSpXw9/e3l5ctW5Ynn3ySgwcPMmHCBIfjDRkyhEuXLtGpUydy5MjhUFeqVCksy+LgwYOpitE2zXzixImEh4fbd0bPDPnz56dDhw5ERUUxdOhQh7qlS5eyZMkS8uTJQ5MmTezlv/32G5cvX3Y61sWLF+nTpw8AzZs3t5fny5ePihUrEhcXx7Bhwxz6XL16lQ8//BDAYWf5wYMHu/y39c477wDwf//3f4SGhjJ48GCH4wUFBWFZln0aO8CBAwf4/PPPXS4vYIxh+PDhQGKC1yZXrlyMHTsWSFwrdt26dU59f/vtN55//nkg8d+abcOg4sWLJ3tv1KtXD0icXh8aGpqi3dVFRO41lse/KSAlNLOGtxqX58PHvBn+uDdPVS6c2eHcdTRSU0RERLKEu3ktzdR6//33iYuLIyQkhEceeYR69epRu3ZtcubMycmTJ+2jymwj5lauXMmuXbuoWrUqderUSfa4Xbt2Zfjw4YSFhRESEuIwai85r732GmPGjOGHH35gwIABDiNHkyb8du7cCcCAAQPsI+y6detmny4MiQmj+fPnM336dOrWrUtgYCCHDx9m2rRp+Pj48N1333Hjxolffvkl9erVo3fv3ixfvpyKFSvy22+/ERERQfny5e2Jr6RsCdOUXF9SDz/8MA0aNGD16tUAPPvssxQqVChVx0jOzp07nXaeP3funMNrOHr0aPLnz29//umnn/Lbb78xfPhwVq9eTZ06dTh06BCzZs3C3d2diRMnOkxP//jjj1m5ciX+/v6UKFECHx8fjhw5wqJFizh//jz16tXjvffec4jh888/p3nz5nz44YcsXbqUevXqceXKFRYtWsShQ4coV64cAwYMuO3rd/WeREVF0adPH9555x0ee+wxqlSpQq5cuTh16hQrVqxg//79FCxYkDFjxjgcq3Pnzpw/f55+/fpRv359AgICqFWrFpZlsWnTJlavXo0xhueee84pWSsiInIvKfKAN8VyJX52yuOtRPSNlNQUERERyQSDBw+mXbt2fPnll0RERBAWFsbVq1fJly8fNWrUYMCAAfZd0ydOnAgkJhFvplSpUjRq1IilS5cyb948Wrdufcs4vL29ee+99+jduzcffPCBfW1OwL4OYVIzZ860fx8QEOCQ1PTy8mLZsmV88sknTJ06lbFjx5I7d25atWpFSEiIy6n2ZcuWZfPmzQwePJjFixezcOFCHnzwQXr37s2QIUMcdvKGxEThsWPHeOyxxyhWrNgtr+9Gr7zyij2p+corr6S6f3JOnDjh9HpdvnzZoSw4ONghqVmwYEF+++03PvzwQ2bNmsXGjRvJlSsXzZs357333nOadt+9e3dy5MjB77//zsqVK7l8+TJ58+alVq1atG/fni5dujglehs1asTvv//OqFGjWLVqFV988QXu7u6UKVOG9957j/79+ye7rmdq/PXXX/bYbSpWrMisWbP49ddf2bhxIz///DORkZH4+PhQrlw53n//ffr27ety5/k+ffrw1FNP8fnnn7N8+XI2btxoX180Z86cTJw4keeee+624xYREZGsy9KW8HeGZVlb/Pz8/JIuxH6vsC1IHxAQkKlxSNbz3Zs9OXc8cefSoE+/Il/R4pkcUdaie08y244dOwAcdjS+X1y4cAH4d01AuXPmzp1Ly5YtWbBgAc2aNcvscAQ4f/48+fLlo1+/fowcOTLDzhMXF8eTTz5JREQE48eP54033siwc92t7uefu5L59Nnz3mDb8Vw7nWcd9/q9V6tWLbZu3brVGFMrtX01UlNEREREsoxVq1ZRvXp1JTTvImvWrCFbtmy89dZbGXoeDw8Pvv/+e5588kl69+6Nt7d3shsDiYiIa0pmyr1ESU0RERERyTJuXH9RMt/TTz/N1atX78i58uTJw7Rp05g2bRrHjx8nKiqKPHny3JFzi4iIyN1FSU0REREREckySpQoQXBwcGaHISIiIpnM7dZNRERERERERERERO4eSmqKiIiIiIiIiIhIlqKkpoiIiIiIiIiIiGQpSmqKiIiIiIiIiIhIlqKkpoiIiIiIiIiIiGQpSmqKiIiIiIiIiIhIlqKkpoiIiIiIiIiIiGQpSmqKiIiIiIiIiIhIlqKkpoiIiIiIiIiIiGQpHpkdwL3MsqxcQK7rT7MlJCRkZjgiIiIiIiIiIiL3BI3UzFj9gGPXH1VPnjyZyeGIiIiIiIiIiIhkfUpqZqwxQNHrj78KFSqUyeGIiIhkXUffXcPRd9dkdhguWZbl8HB3d8fX15eAgADCw8Mxxjj1OXjwoFM/Dw8PChYsSJMmTZgzZ47Lc+3Zs4cRI0bQsGFDihcvjqenJ4UKFaJly5ZERETcNM7JkydTp04dcubMSZ48eQgICGD+/PnJtr9y5QpDhgzh4YcfJnv27BQsWJD27duzY8eOVL0+QUFBWJZFeHg4AL/++iuWZVG3bt1b9v3xxx+xLItWrVrZy2583W582M4DcPToUYYPH067du0oV64cbm5uWJbF3r17b3nugwcP8uqrr1KmTBmyZ89Ovnz5qFu3LmPGjHFqO2DAAAIDAylevDje3t74+vpSs2ZNQkJCOHv2rMvjx8TEMGHCBOrUqUP+/PnJmTMnFStWpHfv3hw6dOiW8cXExFClShUsy6JYsWK3bH+j48eP8+abb1KpUiV8fHzw9vamRIkS+Pv7M3DgQPbt2+ey37lz5xg6dCh16tQhb968ZM+enRIlStCxY0dWrlx5y/P+9ddfdOrUieLFi+Pl5UXBggXx9/fn+++/T/U1iIiIyP1L088zkDHmAnABwLKsa25uyiGLiIjcy4YMGQLAtWvX2Lt3L7NmzWLVqlVs3ryZL774wmWfPHny0LdvXyAxSfX3338zf/58lixZwqhRo3j77bcd2n/wwQf8/PPPVKpUiWbNmuHr68uuXbuYO3cuc+fOZdy4cfTu3dvpPG+//TZjxoyhWLFidO/endjYWH766Seefvppxo8fzxtvvOHQPiYmhsaNG7Nu3Tpq165Nnz59OHLkCNOmTWPBggWsWLEiRUlJVxo3bkzp0qXZtGkTf/75J9WqVUu2bWhoKADdu3d3qrO93jeqUaOG/fvNmzczaNAgLMuidOnS5MmTh/Pnz98yxiVLltCmTRvi4uJo0aIFHTp04OLFi+zatYtZs2bRr18/h/Zjx47Fz8+Pxo0bU7BgQS5dusTGjRsJDg7m22+/ZePGjRQvXtzePi4ujsDAQNatW0eFChXo2LEjXl5e/P7774wfP57vv/+e9evXU6lSpWRjfP/991OU/HRl27Zt+Pv7ExkZSdWqVencuTN58uTh8OHDbNu2jY8++ojSpUtTtmxZh36rV6/m2Wef5cyZM1SsWJEXXniBXLlysWfPHubOnctPP/1E9+7d+fLLL/HwcP5TIzw8nG7duuHj40OLFi0oVaoU58+fZ9u2bSxcuJBOnTql6XpERETkPmSM0eMOPIAtfn5+5l4UERFhIiIiMjsMyYIm9e1hRrdvbka3b27OHD2c2eFkObr3JLNt377dbN++/Y6d78iA1ebIgNV37Hw3Ex0dbaKjo+3PAZP4scrR2rVrjZubm7Esy+zfv9+h7sCBAwYwJUuWdOr3n//8xwDGx8fHXLp0yaEuLCzMbN261anPypUrTbZs2Yynp6c5fvy4Q926desMYMqWLWsiIyMdYvD19TVeXl7mwIEDDn0++ugjA5i2bdua+Ph4e/ns2bMNYCpVquRQfjOdO3c2gAkLC7OXDR8+3ACmV69eyfbbs2ePsSzLFC9e3OFcyb3erhw5csSsXr3aREVFGWOM8ff3N4DZs2dPsn327dtncubMaYoXL2527drlVB8bG+tUduXKFZfHev/99w1gXn31VYfyX375xQAmMDDQ6XUcPHiwAczLL7+cbIwRERHGsizz1VdfGcAULVo02bauBAYGGsAEBwe7rN+3b5/ZsWOHQ9nff/9tcuTIYdzc3Mznn39uEhISHOoPHz5s/Pz8kn1fN2zYYNzd3U316tXNP//841Tv6nW90Y333v3mTv/cFUlKnz1FMse9fu9d/+ywxaQh16ahgyJyx4zp0MLhce74UXtd+FuvOtWLiGR1jz32GBUqVMAYw5YtW1Lcr0OHDuTMmZPLly+zfft2h7qgoCBq1qzp1Mff35+AgABiY2NZv369Q93XX38NwMCBA8mbN6+9vFSpUrz++uvExMQQFhZmLzfG2PuMHDmSpLNNWrZsSf369dm+fTurVq1K8TXdqEuXLnh4ePDDDz9w9epVl21CQ0MxxtC1a1fSOuOlWLFi1K9fn9y5c6e4T3BwMBcvXuSrr76ifPnyTvXZsmVzKsuePbvLY7Vv3x5IXDYgqf379wPQvHlzp2tr2bIlAKdPn3Z5zOjoaIKCgggMDKRnz563uBrXbP9G+vTp47K+TJkyVKhQwaGsd+/eXLp0if79+9OrVy8sy3KoL168OPPnzydv3ryMHz+e//73vw71/fv3Jz4+nh9++IHChQs7ndPV6yoiIiKSHCU1RURERDKQub6eZmoTNmnpZ2t747TfFStWANCkSROnPk2bNnVoA7Bv3z4OHz5M+fLlKV26dIr6pFbhwoVp0aIF586dY8aMGU71cXFxTJ48GTc3N7p06ZLm86TWtWvXmD59OgULFqRZs2Zs2rSJsWPHMmrUKObPn09sbGyqjjdv3jwApyn2lStXBmDRokUkJCQ41NnWOW3UqJHLY/bu3Ztz584xadKkVMWSVL58+QDYvXt3itofOHCA5cuX4+XlRf/+/ZNt9+CDD9KtWzcAvvnmG3v50aNHWbNmDbVr16Zy5cpEREQwevRoxowZw/Lly51eAxEREZFb0ZqaInLH9PvZcTOK797saR+tGfTpV+QrWtxVNxHJojJqU5+MOG6xT+qn+zEhcf3BXbt24enpSZ06dVLc78cff+TSpUsUKFCAhx9+OEV9Dh06xPLly/Hx8aFBgwb28kuXLnHs2DFy5szJgw8+6NTvoYceAhyTW7t27QJwOUoxuT5p8corrzB79mwmTpzICy+84FA3b948Tpw4QfPmzR3WokwqODjYqaxUqVIEBQWlOaZt27Zx5coVHn30UZ577jl++eUXh/oSJUowffp0HnnkEZf9R48ezcWLF4mKimLz5s2sXbuWatWq8e677zq0a968OW3atGHmzJlUrVqVRo0a4enpyZYtW1i7di29evVyWucUYNasWUyePJnQ0FBKlCiR5uvs0KEDY8aM4ZlnnuHVV1/liSeeoEaNGsmOaF27di0AtWrVchjt60rjxo0ZNWoUq1evtpf9/vvvQOK/nYYNGzptKFS1alVmzpxJuXLl0nxNIiIicn9RUlNEREQkndiSbEk3CjLGMHr0aJcJRYDz58/b+8XExLBt2zYWLFiAp6cn33zzTbLTmpOKiYnhhRdeICYmhpEjRzoknaKiooDEDYlcsZUn3TwnLX3S4qmnnqJkyZKsWrWKPXv22JOl8O8GQa+88kqy/UNCQpzK/P39byupeerUKQBWrVqFt7c3kyZNolWrVly8eJEJEyYwcuRImjVrxo4dO8ifP79T/9GjR3Py5En78yZNmhAeHk6BAgUc2lmWxfTp0xk6dCjDhg1zWGYgMDCQ559/Hnd3d4c+J0+epEePHjRt2pSuXbum+RoBhg8fTnR0NGFhYQQHBxMcHIxlWZQvX54mTZrQu3dvypQpY2//zz//ACSbYE7K1ubo0X+XmbG9rr/88gv58+dn5syZBAYGcvr0aUJCQpgyZQrNmzfnr7/+wtPT87auTURERO4Pmn4uIiIikk5CQkIICQnho48+4pdffiEuLo5JkybRq1evZPtERUXZ+33yySfMnz8fT09P5s6dS+vWrW95zvj4eF566SXWrVtHhw4dnHZLT6kb10e8GdvU+NT0cSXp1HJbEhMSk2GLFy+mSJEiNG/e/KZx3Pi4cQRgasXHx9u/fvzxx3Tp0gVfX19KlCjBiBEjaNOmDWfOnGHixIku+584cQJjDCdOnGDmzJns37+fmjVrsnXrVod2V69epUOHDowePZoJEybwzz//EBUVxcKFCzl06BANGjRgzpw5Dn26d+/OtWvXkj13anh5efHtt99y9OhRwsPDefXVV6lTpw579+5l3LhxVKlSxT4NHlL3ntvaJl0rNenrGhoaSuvWrcmdOzdly5Zl8uTJ1K5dm927d7tcikBERETEFY3UFBERkQyR3lO6bdPOM2qqeHqwJXMuXbrEhg0b6Nq1Kz179qRkyZI0bNjQZZ+SJUty8OBBIHEDmKVLl9KtWzfat2/Phg0bqFSpUrLni4+P58UXX2TatGm0b9+eH374wSnpZBtVaRt9eSNXozJv1Sc6OtqpT1p17dqVoUOHMnnyZD788EOyZcvGd999R0JCAl26dHEarZjRko5ydZVUbt26NTNnzmTTpk03PU6hQoVo3bo1fn5+lC9fnk6dOrFt2zZ7/SeffMK0adMYN24cPXr0sJc3bdqU6dOnU6NGDfr06WPfNOj7779n3rx5TJ48maJFi97uZTrE2blzZzp37gxAZGQkAwYMIDQ0lC5dunD06FE8PT3tI40PHz58y2PaRmgmHZ1qe129vLxo1qyZQ3vLsmjZsiWbN29m06ZNdOzYMV2uTURERO5tGqkpIiIiks5y5MhBo0aNmDdvHvHx8XTu3JnLly/fsl/u3Ll59tln+fHHH4mOjqZTp072ROmN4uLi6NixIz/99BPPP/88U6dOddogyBZL0aJFuXjxon0KcVK2XbmTrp9pW8czuTUzXfVJq6JFi9KsWTNOnjzJvHnzSEhI4LvvvsPNzc2+4cydlHQN0wceeMCp3pacu3LlSoqOV7JkSSpVqsTff//NmTNn7OW2UZBPPPGEU5/q1avj6+vLoUOHOHv2LIB9pGfnzp2xLMvhAXDs2DH789tZFsDX15dvvvmGEiVKcPr0aXsi9vHHHwdgy5Yttzz+smXLgMT1N21sr2uuXLlc7mSf2tdVRERERElNERERkQxSrVo1unfvztGjRxk7dmyK+zVr1owmTZqwZcsWpk6d6lQfGxtL27ZtmTZtGp06dWLKlCk3HdFoGyW6ePFip7pFixY5tAEoW7YsJUqUYPfu3Rw4cCBFfW6Hbd3M0NBQli5dyqFDh3jyyScpWbJkuhw/NXx9falRowaAw8hKG1tZqVKlUnzM48ePAzi8RzExMQCcPn3aqX1MTIx9NKxtfclHH32Url27unwA+Pj42J97eXmlODZX3NzcyJEjB/Dv6OMyZcoQGBhITEwMo0aNSrbvyZMn7UsJPP/88/byatWqkT9/fs6cOeOw5qhNWl5XERERub8pqSkiIiKSgQYNGkT27NkZPXo0586dS3G/YcOGATBkyBDi4uLs5TExMbRu3Zo5c+bQtWtXwsLCXI58S6pnz55A4uYwSWM4ePAgEyZMwMvLi5dfftleblmWvU///v1JSEiw182ZM4c1a9ZQqVIl/P39Hc4TFBSEZVmEh4en+Dohccp1sWLFWLJkCUOHDgUS14/MLK+//joAAwcOdFgXMmly+rnnnrOX79y5kxMnTjgdJyEhgYEDB3Lq1Cnq1avnMLW9fv3EZRQ++ugje4LTJjg4mLi4OB555BFy5coFJO5WHhoa6vIBiSMdbc+9vb3txwoPD8eyLKfNk0JCQuzLHtxo+vTp7Ny5k7x581KlShV7+bhx48iRIwcjRozgq6++cup37NgxWrRoQWRkJA0aNKBdu3b2Og8PD/s0+xv/Tf3111+Eh4fj4eFB27ZtXcYkIiIiciOtqSkiIiJZwt28lubNFC1alB49ejBu3DhGjhzJxx9/nKJ+tWvXpmXLlsyZM4dJkybZE0I9e/Zk4cKF5M+fn6JFi9qTgEkFBAQQEBBgf16vXj3eeustPv30U6pVq0bbtm2JjY3l559/JjIykvHjxzuNkHvrrbeYP38+06dPp27dugQGBnL48GGmTZuGj4+PfYp4UrZElatp8Dfj7u5Oly5dGDp0KOvXr6dw4cI888wzqTrGzSRN6O3cuROAAQMG2BOG3bp1s0+vBujSpQsLFixg9uzZVK9enaeeeopLly4xe/ZsIiMj6d27t8Pru3jxYt555x0aNGhA2bJlyZcvHydPnmTVqlXs37+fwoULO23uM3DgQObNm8fy5cupUKECTZo0wdvbm3Xr1rFp0ya8vb0ZN27cbV97cu/J2LFjCQ4OpmbNmtSuXZsCBQoQFRXF1q1b2bBhAx4eHnz99dcOoz4rV67MwoULadOmDa+99hoTJkzgiSeeIFeuXOzdu5cFCxZw+fJlKlWqxMyZM51GD7///vssX76c77//nr/++ouAgABOnz7NjBkzuHr1KmPGjKFcuXK3fc0iIiJyn3C1a6Qe6f8Atvj5+Zl7UUREhImIiMjsMCQLmtS3hxndvrkZ3b65OXP0cGaHk+Xo3pPMtn37drN9+/bMDiNTREdHm+joaPtzwCR+rHLtxIkTxsfHx/j4+JgTJ04YY4w5cOCAAUzJkiWT7fe///3PWJZlihYtaq5cuWKMMcbf399+vuQeQ4YMcXm88PBwU7t2bePj42Ny5sxpGjRoYObNm5fs+S9fvmwGDx5sypUrZzw9PU3+/PlN27Ztzd9//+2yfY0aNUyuXLlMZGSkQ3nnzp0NYMLCwpI91+HDh42bm5sBzHvvvZdsO2Nu/Xon1z65h6u4rl27Zj777DNTvXp14+3tbXLkyGHq1atnpkyZ4tT2r7/+Mq+99pqpXr26yZcvn3F3dze5c+c2tWvXNkOGDDFnz551GdepU6dMv379TIUKFYyXl5fJli2bKVGihAkKCjI7duxI1fUVLVrUZV3fvn0NYJYuXepQvmbNGvP++++bxx57zBQvXtx4enoaHx8fU758edOtWzfz559/Jnu+s2fPmuDgYFOrVi2TO3duh9dy0KBB5urVq8n2vXTpkhkyZIh5+OGHjaenp8mdO7cJDAw0CxcuTNG13njv3W/u55+7kvn02VMkc9zr956fn58Btpg05Nosk8zi85K+LMva4ufn57dly5bMDiXdrVy5EsBhxIJISnz3Zk/OHU/cITXo06/IV7R4JkeUtejek8y2Y8cOACpWrJjJkdx5Fy5cALCP9BM4f/48+fLlo1+/fowcOTKzw5Hr/Pz88PDwuOVu7bcrJCSE4OBg2rdvz9SpUzNs1/r7/d67n3/uSubTZ0+RzHGv33u1atVi69atW40xtW7d2pGmn4uIiIjIbVuzZg3ZsmXjrbfeyuxQ5LqoqCj++OMPZsyYkeHnGjJkCPv27WPKlCl4e3sTFhZm35ldREREJCMoqSkiIiIit+3pp5922FRHMl+ePHmIj4+/Y+cLDQ2lUqVKXL16lW3btlG1atU7dm4RERG5/yipKSIiIiIit83T05N33303s8MQERGR+4TbrZuIiIiIiIiIiIiI3D2U1BQREREREREREZEsRUlNERERERERERERyVKU1BQREREREREREZEsRUlNERERERERERERyVKU1BQREREREREREZEsRUlNERERERERERERyVKU1BQREREREREREZEsRUlNERERERERERERyVKU1BQREREREREREZEsRUlNERERERERERERyVKU1BQREZEsITg4mODg4MwOwyXLshwe7u7u+Pr6EhAQQHh4OMYYpz4HDx506ufh4UHBggVp0qQJc+bMcXmuPXv2MGLECBo2bEjx4sXx9PSkUKFCtGzZkoiIiJvGOXnyZOrUqUPOnDnJkycPAQEBzJ8/P9n2V65cYciQITz88MNkz56dggUL0r59e3bs2JGq1ycoKAjLsggPDwfg119/xbIs6tate8u+P/74I5Zl0apVK3vZja/bjQ/beQCOHj3K8OHDadeuHeXKlcPNzQ3Lsti7d+8tz33w4EFeffVVypQpQ/bs2cmXLx9169ZlzJgxTm0HDBhAYGAgxYsXx9vbG19fX2rWrElISAhnz551efyYmBgmTJhAnTp1yJ8/Pzlz5qRixYr07t2bQ4cO3TK+mJgYqlSpgmVZFCtW7Jbtb3T8+HHefPNNKlWqhI+PD97e3pQoUQJ/f38GDhzIvn37XPY7d+4cQ4cOpU6dOuTNm5fs2bNTokQJOnbsyMqVK296zrVr19KyZUtKlSpl79esWTMWL16c6vhFRETk/uaR2QHcyyzLygXkuv40W0JCQmaGIyIiIhlsyJAhAFy7do29e/cya9YsVq1axebNm/niiy9c9smTJw99+/YFEpNUf//9N/Pnz2fJkiWMGjWKt99+26H9Bx98wM8//0ylSpVo1qwZvr6+7Nq1i7lz5zJ37lzGjRtH7969nc7z9ttvM2bMGIoVK0b37t2JjY3lp59+4umnn2b8+PG88cYbDu1jYmJo3Lgx69ato3bt2vTp04cjR44wbdo0FixYwIoVK1KUlHSlcePGlC5dmk2bNvHnn39SrVq1ZNuGhoYC0L17d6c62+t9oxo1ati/37x5M4MGDcKyLEqXLk2ePHk4f/78LWNcsmQJbdq0IS4ujhYtWtChQwcuXrzIrl27mDVrFv369XNoP3bsWPz8/GjcuDEFCxbk0qVLbNy4keDgYL799ls2btxI8eLF7e3j4uIIDAxk3bp1VKhQgY4dO+Ll5cXvv//O+PHj+f7771m/fj2VKlVKNsb3338/RclPV7Zt24a/vz+RkZFUrVqVzp07kydPHg4fPsy2bdv46KOPKF26NGXLlnXot3r1ap599lnOnDlDxYoVeeGFF8iVKxd79uxh7ty5/PTTT3Tv3p0vv/wSDw/HPzW++uorXnvtNXLkyEHr1q0pVqwYR48eZebMmSxatIgPP/yQgQMHpul6RERE5D5kjNEjgx5AMGBsjwcffNDciyIiIkxERERmhyFZ0KS+Pczo9s3N6PbNzZmjhzM7nCxH955ktu3bt5vt27ffsfMNGTLEDBky5I6d72aio6NNdHS0/bntd/2N1q5da9zc3IxlWWb//v0OdQcOHDCAKVmypFO///znPwYwPj4+5tKlSw51YWFhZuvWrU59Vq5cabJly2Y8PT3N8ePHHerWrVtnAFO2bFkTGRnpEIOvr6/x8vIyBw4ccOjz0UcfGcC0bdvWxMfH28tnz55tAFOpUiWH8pvp3LmzAUxYWJi9bPjw4QYwvXr1Srbfnj17jGVZpnjx4g7nSu71duXIkSNm9erVJioqyhhjjL+/vwHMnj17ku2zb98+kzNnTlO8eHGza9cup/rY2FinsitXrrg81vvvv28A8+qrrzqU//LLLwYwgYGBTq/j4MGDDWBefvnlZGOMiIgwlmWZr776ygCmaNGiybZ1JTAw0AAmODjYZf2+ffvMjh07HMr+/vtvkyNHDuPm5mY+//xzk5CQ4FB/+PBh4+fn5/J9jY2NNXny5DHZs2c3O3fudKjbvn278fLyMt7e3ubq1as3jfvGe+9+c6d/7ookpc+eIpnjXr/3rn922GLSkHfT9POMNQYoev3xV6FChTI5HBEREbmTHnvsMSpUqIAxhi1btqS4X4cOHciZMyeXL19m+/btDnVBQUHUrFnTqY+/vz8BAQHExsayfv16h7qvv/4agIEDB5I3b157ealSpXj99deJiYkhLCzMXm6MsfcZOXIkbm7/fmRs2bIl9evXZ/v27axatSrF13SjLl264OHhwQ8//MDVq1ddtgkNDcUYQ9euXR1iSI1ixYpRv359cufOneI+wcHBXLx4ka+++ory5cs71WfLls2pLHv27C6P1b59eyBx2YCk9u/fD0Dz5s2drq1ly5YAnD592uUxo6OjCQoKIjAwkJ49e97ialyz/Rvp06ePy/oyZcpQoUIFh7LevXtz6dIl+vfvT69evbAsy6G+ePHizJ8/n7x58zJ+/Hj++9//2usiIyOJioqifPnyPPzwww79KlasSPny5bly5QoXL15M0/WIiIjI/UdJzQxkjLlgjDlujDkOXEvrh3ERERHJusz19TRdJcLSu5+t7Y3TflesWAFAkyZNnPo0bdrUoQ3Avn37OHz4MOXLl6d06dIp6pNahQsXpkWLFpw7d44ZM2Y41cfFxTF58mTc3Nzo0qVLms+TWteuXWP69OkULFiQZs2asWnTJsaOHcuoUaOYP38+sbGxqTrevHnzAJym2FeuXBmARYsWceMSRbZ1Ths1auTymL179+bcuXNMmjQpVbEklS9fPgB2796dovYHDhxg+fLleHl50b9//2TbPfjgg3Tr1g2Ab775xl5esGBBChQowO7du50SvLayGjVq2OMSERERuRWtqSkiIiIZIqM29cmI42ZUrKtXr2bXrl14enpSp06dFPf78ccfuXTpEgUKFHAa1ZacQ4cOsXz5cnx8fGjQoIG9/NKlSxw7doycOXPy4IMPOvV76KGHAMfk1q5duwBcjlJMrk9avPLKK8yePZuJEyfywgsvONTNmzePEydO0Lx5c4e1KJNy9b6VKlWKoKCgNMe0bds2rly5wqOPPspzzz3HL7/84lBfokQJpk+fziOPPOKy/+jRo7l48SJRUVFs3ryZtWvXUq1aNd59912Hds2bN6dNmzbMnDmTqlWr0qhRIzw9PdmyZQtr166lV69eTuucAsyaNYvJkycTGhpKiRIl0nydHTp0YMyYMTzzzDO8+uqrPPHEE9SoUSPZEa1r164FoFatWg6jfV1p3Lgxo0aNYvXq1fYyy7KYMGECL774IrVq1aJ169YUKVKEY8eOMWvWLCpXrsxPP/2U5usRERGR+4+SmiIiIiLpxJZkS7pRkDGG0aNHu0woApw/f97eLyYmhm3btrFgwQI8PT355ptvkp3WnFRMTAwvvPACMTExjBw50iHpFBUVBSRuSOSKrTzp5jlp6ZMWTz31FCVLlmTVqlXs2bPHniyFfzcIeuWVV5LtHxIS4lTm7+9/W0nNU6dOAbBq1Sq8vb2ZNGkSrVq14uLFi0yYMIGRI0fSrFkzduzYQf78+Z36jx49mpMnT9qfN2nShPDwcAoUKODQzrIspk+fztChQxk2bJjDMgOBgYE8//zzuLu7O/Q5efIkPXr0oGnTpnTt2jXN1wgwfPhwoqOjCQsLIzg4mODgYCzLonz58jRp0oTevXtTpkwZe/t//vkHINkEc1K2NkePHnUob9euHUWKFKFjx458//339vJChQrx8ssvO5xPRERE5FY0H1pEREQknYSEhBASEsJHH33EL7/8QlxcHJMmTaJXr17J9omKirL3++STT5g/fz6enp7MnTuX1q1b3/Kc8fHxvPTSS6xbt44OHTo47ZaeUjeuj3gztqnxqenjStKp5bYkJiQmwxYvXkyRIkVo3rz5TeO48bFy5crbiik+Pt7+9eOPP6ZLly74+vpSokQJRowYQZs2bThz5gwTJ0502f/EiRMYYzhx4gQzZ85k//791KxZk61btzq0u3r1Kh06dGD06NFMmDCBf/75h6ioKBYuXMihQ4do0KABc+bMcejTvXt3rl27luy5U8PLy4tvv/2Wo0ePEh4ezquvvkqdOnXYu3cv48aNo0qVKvZp8JC699zW9sa1Un/44QcaNWpE/fr12bFjB5cvX2bHjh0EBgbyxhtv8Nxzz932dYmIiMj9QyM1RUREJEOk95Ru2/Eyaqp4erAlcy5dusSGDRvo2rUrPXv2pGTJkjRs2NBln5IlS3Lw4EEgcQOYpUuX0q1bN9q3b8+GDRuoVKlSsueLj4/nxRdfZNq0abRv354ffvjBKelkG1VpG315I1ejMm/VJzo62qlPWnXt2pWhQ4cyefJkPvzwQ7Jly8Z3331HQkICXbp0cRqtmNGSjnJ1lVRu3bo1M2fOZNOmTTc9TqFChWjdujV+fn6UL1+eTp06sW3bNnv9J598wrRp0xg3bhw9evSwlzdt2pTp06dTo0YN+vTpY9806Pvvv2fevHlMnjyZokWL3u5lOsTZuXNnOnfuDCRu6DNgwABCQ0Pp0qULR48exdPT0z7S+PDhw7c8pm2EZtLRqbt376ZLly5Uq1aNKVOm2DdHqlChAlOmTGHXrl1MmzaNlStXEhAQkG7XJyIiIvcujdQUERERSWc5cuSgUaNGzJs3j/j4eDp37szly5dv2S937tw8++yz/Pjjj0RHR9OpUyd7ovRGcXFxdOzYkZ9++onnn3+eqVOnOm0QZIulaNGiXLx40T6FOCnbpi1J18+0reOZ3JqZrvqkVdGiRWnWrBknT55k3rx5JCQk8N133+Hm5mbfcOZOSrqG6QMPPOBUb0t6XrlyJUXHK1myJJUqVeLvv//mzJkz9nLbKMgnnnjCqU/16tXx9fXl0KFDnD17FsA+0rNz585YluXwADh27Jj9+e0sC+Dr68s333xDiRIlOH36tD0R+/jjjwOwZcuWWx5/2bJlQOL6mza//vor165dw9/f32m3dzc3N/s6sFu2bElz7CIiInJ/UVJTREREJINUq1aN7t27c/ToUcaOHZvifs2aNaNJkyZs2bKFqVOnOtXHxsbStm1bpk2bRqdOnZgyZcpNRzTaRokuXrzYqW7RokUObQDKli1LiRIl2L17NwcOHEhRn9thWzczNDSUpUuXcujQIZ588klKliyZLsdPDV9fX2rUqAHgMLLSxlZWqlSpFB/z+PHjAA7vUUxMDACnT592ah8TE2MfDevp6QnAo48+SteuXV0+AHx8fOzPvby8UhybK25ubuTIkQP4d/RxmTJlCAwMJCYmhlGjRiXb9+TJk/alBJ5//vkUXW/Sctv1ioiIiNyKkpoiIiIiGWjQoEFkz56d0aNHc+7cuRT3GzZsGABDhgwhLi7OXh4TE0Pr1q2ZM2cOXbt2JSwszGnk24169uwJJG4OkzSGgwcPMmHCBLy8vHj55Zft5ZZl2fv079+fhIQEe92cOXNYs2YNlSpVwt/f3+E8QUFBWJZFeHh4iq8TEqdcFytWjCVLljB06FAgcf3IzPL6668DMHDgQId1IZMmp5Ou/7hz505OnDjhdJyEhAQGDhzIqVOnqFevnsPU9vr16wPw0Ucf2RN+NsHBwcTFxfHII4+QK1cuIHG38tDQUJcPSBxBanvu7e1tP1Z4eDiWZTltnhQSEmJf9uBG06dPZ+fOneTNm5cqVarYy8eNG0eOHDkYMWIEX331lVO/Y8eO0aJFCyIjI2nQoAHt2rVzut7p06fz559/OvT73//+x/Tp07EsK90S5SIiInLv05qaIiIikiXczWtp3kzRokXp0aMH48aNY+TIkXz88ccp6le7dm1atmzJnDlzmDRpkn3dxZ49e7Jw4ULy589P0aJF7UnApAICAhzWJaxXrx5vvfUWn376KdWqVaNt27bExsby888/ExkZyfjx451GHr711lvMnz+f6dOnU7duXQIDAzl8+DDTpk3Dx8fHPkU8KVvy09U0+Jtxd3enS5cuDB06lPXr11O4cGGeeeaZVB3jZpIm9Hbu3AnAgAED7AnDbt262adXA3Tp0oUFCxYwe/ZsqlevzlNPPcWlS5eYPXs2kZGR9O7d2+H1Xbx4Me+88w4NGjSgbNmy5MuXj5MnT7Jq1Sr2799P4cKFnTb3GThwIPPmzWP58uVUqFCBJk2a4O3tzbp169i0aRPe3t6MGzfutq89ufdk7NixBAcHU7NmTWrXrk2BAgWIiopi69atbNiwAQ8PD77++muHUZ+VK1dm4cKFtGnThtdee40JEybwxBNPkCtXLvbu3cuCBQu4fPkylSpVYubMmQ4jU+vUqcPLL79MWFgYjzzyCK1bt7avJzt79mxiY2Pp27cvlStXvu1rFhERkfuEq10j9Uj/B7DFz8/P3IsiIiJMREREZochWdCkvj3M6PbNzej2zc2Zo4czO5wsR/eeZLbt27eb7du3Z3YYmSI6OtpER0fbnwMm8WOVaydOnDA+Pj7Gx8fHnDhxwhhjzIEDBwxgSpYsmWy///3vf8ayLFO0aFFz5coVY4wx/v7+9vMl9xgyZIjL44WHh5vatWsbHx8fkzNnTtOgQQMzb968ZM9/+fJlM3jwYFOuXDnj6elp8ufPb9q2bWv+/vtvl+1r1KhhcuXKZSIjIx3KO3fubAATFhaW7LkOHz5s3NzcDGDee++9ZNsZc+vXO7n2yT1cxXXt2jXz2WefmerVqxtvb2+TI0cOU69ePTNlyhSntn/99Zd57bXXTPXq1U2+fPmMu7u7yZ07t6ldu7YZMmSIOXv2rMu4Tp06Zfr162cqVKhgvLy8TLZs2UyJEiVMUFCQ2bFjR6qur2jRoi7r+vbtawCzdOlSh/I1a9aY999/3zz22GOmePHixtPT0/j4+Jjy5cubbt26mT///DPZ8509e9YEBwebWrVqmdy5czu8loMGDTJXr1512S8hIcGEhYUZf39/88ADDxh3d3eTN29e07BhQ/Of//wnRdd64713v7mff+5K5tNnT5HMca/fe35+fgbYYtKQa7NMMovPS/qyLGuLn5+f3724+PnKlSsBtFOlpNp3b/bk3PHEHVKDPv2KfEWLZ3JEWYvuPclsO3bsAKBixYqZHMmdd+HCBQD7SD+B8+fPky9fPvr168fIkSMzOxy5zs/PDw8Pj1vu1n67QkJCCA4Opn379kydOjXDdq2/3++9+/nnrmQ+ffYUyRz3+r1Xq1Yttm7dutUYU+vWrR1p+rmIiIiI3LY1a9aQLVs23nrrrcwORa6Liorijz/+YMaMGRl+riFDhrBv3z6mTJmCt7c3YWFh9p3ZRURERDKCkpoiIiIictuefvpph011JPPlyZOH+Pj4O3a+0NBQKlWqxNWrV9m2bRtVq1a9Y+cWERGR+4+SmiIiIiIicts8PT159913MzsMERERuU+43bqJiIiIiIiIiIiIyN1DSU0RERERERERERHJUpTUFBERERERERERkSxFSU0RERERERERERHJUpTUFBERERERERERkSxFSU0RERERERERERHJUpTUFBERERERERERkSxFSU0RERERERERERHJUpTUFBERERERERERkSxFSU0RERERERERERHJUpTUFBERkbvexYu7OXIknAMHvhadD7YAAC3YSURBVODIkXAuXtyd2SG5tHnzZl5++WXKlCmDt7c3uXPnpmrVqrzzzjscO3bMqf3KlSuxLMvh4enpSbFixWjXrh0bNmxweR5jDDNmzKBly5YUKVIET09P8uXLR4MGDRg3bhxXr1512W/16tW89NJLVKlShXz58pE9e3ZKly7NM888w/Lly53aX7t2jVmzZtG1a1eqVKlC7ty58fHxoWrVqgwePJgLFy7c1uu1dOlS+vXrR2BgIL6+vliWxeOPP55s+2PHjjF+/HiaNm1KqVKl8PLyIl++fDRu3JiZM2cm2y86OpqPPvqIGjVqkDdvXvLkyUPVqlX54IMPOH36dIrjtb1fAQEBACQkJFCiRAksy2L79u037XvlyhUeeOABPD09OXXqFABBQUFO73/Sh+08NpMmTaJHjx7UrVsXHx8fLMti0KBBt4z72rVrfP7559StW5c8efKQI0cOypcvT6dOnZyuf/bs2XTo0IEKFSqQN29evL29eeihh+jYsSObN29O9hxr166lZcuWlCpViuzZs1OiRAmaNWvG4sWLbxkfwLBhw+zXvWzZshT1sbnxdXN3dyd//vw0bNiQH3/8Mdl+ru7X6tWrM2DAAE6cOOGyz5EjR3jttdeoW7cuhQsXxsvLiyJFilC/fn3CwsK4du2aU59169bRv39/HnnkEQoUKICXlxelS5emW7du7N27N1XXKiIiIv/yyOwAROT+MaZDi2Trwt961ams38/zMzIcEckCIiPXceDgF5w/v8mp7oEH6lC61Bv4+j6WCZE5Msbw7rvvMnLkSDw8PGjcuDHt2rUjNjaW9evXM3r0aL788ksmT55M27ZtnfqXLFmSoKAgAC5dusTGjRuZPn06M2fOZPr06bRu3dre9vz587Rv356lS5eSJ08emjVrRqlSpYiMjGTJkiX07duXCRMmsGDBAh566CGH86xYsYIVK1ZQt25dGjZsSI4cOTh8+DBz585l3rx5DBo0iGHDhtnb79u3jzZt2pAjRw6eeOIJmjdvzsWLF1myZAnDhg3j559/Zt26deTPnz9Nr9uECROYM2cO2bNnp1y5cpw7d+6m7cePH8+IESMoXbo0TzzxBIULF+bQoUPMnDmTZcuW8eabb/Lpp5869ImKiqJOnTrs3r2b2rVr21/n1atX8+GHHxIeHs7mzZspVKhQquN3c3OjS5cuhISEEBoa6nTupH755ReioqJo27YtBQsWdKhr2bIlNWrUcOpTqlQph+f9+vUjKiqKvHnzUqRIEfbt23fLGCMjI2natCmbNm3Cz8+PLl264OnpyZEjR1i2bBknT56kQIEC9vZz5szh999/55FHHrEnzPfu3cusWbP4+eef+fbbb+nWrZvDOb766itee+01cuTIQevWrSlWrBhHjx5l5syZLFq0iA8//JCBAwcmG+PWrVsZNmwYOXPm5OLFi7e8puQMGTIESEzi7tq1i9mzZxMREcGWLVsc3ptb3a8jR47kyy+/5D//+Q8tWjh+dtm3bx8//vgjdevWpVWrVvj6+nL27FkWLVpEly5d+P7771m6dCkeHv/+mfXss89y+vRp6tWrxwsvvICHhwcbNmxg0qRJ/PTTTyxdupRHH300zdctIiJy3zLG6HEHHsAWPz8/cy+KiIgwERERmR2GZAGj2zdP1UNuTveeZLbt27eb7du3Z9jxjx372SxbXs4sW17mJo9y5tixXzIshuRER0eb6Oho+/OQkBADmFKlSplt27Y5tZ8+fbrJnj27cXd3NytWrLCXR0REGMD4+/s79Rk8eLABTOnSpe1l8fHxplGjRgYwTz31lDlz5oxDn2vXrpn33nvPAKZkyZImMjLSof7KlSsur+fo0aOmYMGCxs3NzRw/ftyhfMKECebixYsO7WNiYkzz5s0NYN544w2Xx0yJ9evXm23btpm4uDhz4MABA5jHHnss2fYzZswwK1eudCrfvn27yZ07twHM5s2bHepGjhxpAPPyyy879evcubMBTEhISIridfV+HT582Li7u5v8+fObmJiYZPs+/vjjBjC//vqr0/nDwsJSdP5FixaZgwcPGmOMCQsLM4AZOHDgTfvY3qcvv/zSqS4hIcHExcU5lCX3b+TPP/80Xl5eJk+ePA7XGRsba/LkyWOyZ89udu7c6dBn+/btxsvLy3h7e5urV6+6PO6VK1dMpUqVzKOPPmpeeuklA5ilS5cmez033nvGGAOYxD9rHC1btsxYlmUsyzIHDhywl6f0fs2WLZvZuHGjQ11MTIyJj4936hMbG2sCAgIMYH7++WeHuk8++cQcO3bMqc/w4cMNYKpUqZLs9d4oo3/uityMPnuKZI57/d7z8/MzwBaThlybpp+LyB3T7+f5Do8ngl6x17068UenehG5f0VGrmPHzoFAwi1aJrBj5/tERq67E2G5dPDgQYYNG0a2bNmYO3culStXdmrz7LPPMnbsWOLj43n11VdJSLjVdcHrr78OwIEDB+xThKdOncqyZcsoU6YMM2fOJF++fA59PDw8+Oijj+jQoQOHDh0iJCTEoT579uwuz1W0aFHq1atHQkIC+/fvdyi3jcBLytPTk/fffx9InJKdVo8++iiVK1fG3d09Re3btGmDv7+/U3nFihXp0KGDy3hs1/P000879XvmmWcAUjUF/UbFixenSZMmnDlzhlmzZrlss3PnTtauXUvp0qVp1KhRms/VpEkTSpYsmeL2K1asYMGCBbRt25ZXX3WeEWGbqp1Ucv9GqlatSsWKFYmKinJ4vSIjI4mKiqJ8+fI8/PDDDn0qVqxI+fLluXLlSrIjMN977z0OHDhAeHg4bm7p+6dJYGAgFSpUwBjD77//DqTufr127Ro9evRwqPP09HQZZ7Zs2WjVqhUAe/bscagbMGAARYoUceozYMAAvL292bZtG2fPnk3rZYqIiNy3lNQUkUzj1/QZ+/c+ufNkYiQicrc5cPALbp3QtEngwMEJGRnOTYWFhREXF0fr1q2pWrVqsu26detGkSJF2LVrF6tWrbrlcU3iTA8gMfkEMHHiRADefvttfHx8ku07ePBgIHENRldr/N3o1KlT/Pbbb3h5eTklppKTLVs2AIdptpkpuXhsSasFCxY49Zk/P/E/0G4n0QjQvXt3AEJDQ13W28q7detmfy/vhKlTpwKJa3eePHmSSZMm8fHHHxMWFuZyjdeb2b17N7t27SJ//vw8+OCD9vKCBQtSoEABdu/e7ZTMs5XVqFHDKQEPEBERwbhx4/j4448pX758Gq7w1mz3ke11T+39+scff7Bx48Zbnic+Pp6FCxcCUK1atRTFZlmW/d9rShP7IiIi8q+741OoiIiIyHUXL+52uYbmzZw//xsXL+4mZ86MSYzczNq1a4FbJ8Y8PDwICAhg6tSprFu3jieeeOKm7SdMSEzUlilThvz58xMXF2dPrtzqXJUqVaJIkSIcP36c//73v9SpU8ehfvPmzcyfP5+4uDiOHj3K3LlziY6OZvz48SleH/O7774DEkcPZrbo6GhmzJiBZVk8+eSTDnXdunXjP//5D5MmTeKvv/7i8ccfxxjDmjVr2L59O8OHD6dly5a3df4WLVpQpEgRli9fzoEDByhdurS9LjY2lu+//x4PDw+6dOnisv/s2bM5ePCgU3nfvn154IEH0hyXbXTi7t27ad++PZcvX7bXZcuWjcGDBye70dCyZctYu3YtsbGxHDhwgHnz5gGJCdqkIxUty2LChAm8+OKL1KpVi9atW1OkSBGOHTvGrFmzqFy5Mj/99JPT8aOioggKCqJ+/fr07t07zdd4M8uWLWPXrl1YlsUjjzwCpO1+Xb16Nf/3f//nUH/mzBm++OILjDGcPn2apUuXsnfvXp5//nmndTiTM23aNC5cuMD//d//3db7LCIicr9SUlNEREQyxPIVZe/o+X7b1DTNfQMb3nrDleT8888/QOI05FuxtTl+/LhD+cGDBwkODgYSNwr67bffWLNmDW5ubowePRpInOYbGxubqnMdP36co0ePukxqJp2anitXLsLCwnjppZdueVyAuXPn8s0331CsWDH69++foj4ZxRhDt27dOHnyJK+99hoVK1Z0qM+ePTsrVqygT58+fPPNN2za9G/CvG3btvYpw7fD3d2dLl268OGHHzJp0iQ+/PBDe92cOXM4ffo0rVu3pnDhwi77z5kzhzlz5jiVBwUF3Vayy7bL+jvvvMPzzz/P4MGDKVCgACtWrKBnz5588MEHFCtWzL55UlLLli1jxIgR9ueFCxcmPDycp556yqltu3btKFKkCB07duT777+3lxcqVMi+u/iNevXqxdmzZ4mIiEi30au2eyjpRkHGGN588037tP203K9Hjx51qjtz5ozDPWRZFm+//TYfffRRiq7nwIED9OrVCw8PD8aMGXPL9iIiIuJM089FREREbsON01vT0ta2/mVISAifffYZe/fupU2bNqxZs8a+83nS6eipievq1atOdT179sQYw5UrV9i+fTsvv/wynTp1omfPnrc87vr163n++efJkSMHM2bMIG/evKmKK73169ePadOmUb9+fZe7j589e5annnqK2bNn89NPP3H27FnOnDnDTz/9xJo1a6hbt65DojOtunXrhpubG2FhYcTHx9vLbUsGvPLKK8l1JSwszOXi9zfufp5atjhq1qzJ5MmTKVeuHHny5KF169b2KfEff/yxy76ffPIJxhguXrzI1q1badiwIU2bNmX48OFObX/44QcaNWpE/fr12bFjB5cvX2bHjh0EBgbyxhtv8Nxzzzm0nzlzJlOmTGHkyJEuE55pZbuHPv74Y1asWEH9+vWZMmWK087nkLr71dU9ZFurMy4ujkOHDjF27Fi+/fZbGjRoQGRk5E2Pe+rUKZo2bcrp06cZN24c9erVS81lioiIyHUaqSkimUobAolIVvfggw+yc+dODh8+fMu2thFfSdckBPD397/lhjv58uXD09OT2NhYjhw5wkMPPZSicxUoUCDZNtmzZ6dixYqMGzeOmJgYvvnmGxo1akTbtm1dtt+wYQNNmzbFzc2NRYsWOY0AvdPeeecdxo4dS4MGDViwYAFeXl5Obfr168eqVauYM2eOfWMggA4dOpA9e3ZatWpF//79b2vDI4CSJUvSuHFjlixZwsKFC3n66ac5ePAgy5Yto2TJkk7T4u+EvHnzcvr0aVq1auWUxGvevDmenp7s3r2bqKgo8uRxvbZ1jhw5qFmzJj/++CORkZF88MEHPPnkk/bp3Lt376ZLly5Uq1aNKVOm2KemV6hQgSlTprBr1y6mTZvGypUrCQgIIDIykh49etCwYUOXmxfdjpQk/tNyv97sHnJ3d6dEiRL06dOHQoUK0bFjRwYPHswXX3zhsv2pU6do2LAhu3btYty4cbz22mu3jENERERcU1JTREREMkRap3RfvLg7TVPJ69ZZlClraj7++ONERESwbNky+4YxrsTHx9sTZ4899liqz+Ph4UHdunVZs2YNy5Ytu2lSc8eOHRw/fhw3Nzdq1qyZouM3bdqUb775hpUrV7pMaq5Zs4bmzZvj5ubGkiVLnNYYvNPefPNNPvvsM5544gnmz5+f7MZJts2AXK1haivbsmVLusT0yiuvsGTJEiZOnMjTTz/NpEmTMMbQtWvXdN/ZOyUefvhhdu/e7XIKu5ubG7lz5+bMmTNcuXIl2aRmUk2aNGHx4sWsWrXKntT89ddfuXbtGv7+/k7X6ObmRoMGDdiyZQtbtmwhICCAw4cPc+bMGVasWJHsa9K4cWMAxo4dS9++fVN30beQlvu1Vq1aKTp206aJP7eSS5D/888/BAYGsnPnTiZMmKCEpoiIyG3S9HMRERG5q+TMWZ4HHkjdCMAHHqibKQlNSFz30N3dnVmzZvH3338n2+67777j+PHjPPzww/j7+6fpXLYpzJ9++ilXrlxJtp1tTcfGjRuneOMf227YrnYzX7FiBU2bNsXDw4OlS5dmakLTGMPrr7/OZ599RuPGjVmwYMFNd4KPiYkB4PTp0051tjJPT890ie2ZZ56hcOHCLFy4kCNHjhAeHm5fbzMzBAYGArBt2zanupMnT3LmzBly5MhxW/9Gbvb6Ji23vcb58uWja9euLh+2RH3Tpk3p2rUrVapUSVFcqdGlSxc8PDxSfL/6+vqmeDOsm91DR48exd/fn507d/L1118roSkiIpIOlNQUERGRu07pUm+Q8o8pbpQu9XpGhnNTZcqU4f333+fatWs888wzbN++3anN7Nmz6dOnD+7u7nz55ZdpHrXXsWNHAgMD2bt3L23btuXcuXMO9fHx8QwePJipU6fi4+PjsNELwKpVq0hISHA67r59++xrJTZv3tyh7tdff6VFixZkz56d5cuX20foZQZjDK+88gpffvklTZs2Ze7cuXh7e9+0T/369YHE9RaTXnt8fDxDhgwB/k3+2axcuRLLsggICEhVfB4eHgQFBREfH88LL7zA0aNHadasGUWLFk3VcdLLCy+8wAMPPEB4eDh//fWXvTwhIcG+wVPbtm3tSbiYmBjWr1/v8li///47X3/9NW5ubg5JPtvrO336dP7880+HPv/73/+YPn06lmXRsGFDIHHzndDQUJcP29qSb731FqGhoU47lFuWRe7cuW/nJaFUqVIMGjQoRfcrwIgRIxyS5r/99pvDLvI2Fy9etPe58R46fPgw/v7+7Nu3j0mTJt10fVURERFJOU0/FxERkbuOr+9jVKwwnB07BwLOSbh/uVGxwkf4+qZ+Ond6Cg4O5tKlS3z66adUr16dp556isqVK3Pt2jXWr1/Pb7/9hre3N//5z3/syZ20cHd3Z/r06bRr146FCxdSpkwZmjdvTsmSJYmMjGTJkiUcOHAALy8vfvzxR6pXr+7Qv2XLljzwwAPUrVuX4sWLExcXx759+1i8eDFxcXH06tXLPvUXYNeuXbRs2ZKrV6/SrFmzZHfptu06nVpr1661b1hz8eJFAPbs2eOwG3d4eLj9+6FDhxIaGoq3tzc1atTgk08+cTpmjRo1HHY0HzFiBOvXr+f7779ny5Yt9td/+fLlbN++nfz58/PRRx85HMOW/HQ14u5WunfvzogRI1izZg1w8w2CUis0NJS1a9cCsHfvXgDmzZtnX/uxQoUKvPvuu/b2+fPn59tvv+W5556jbt26PPvssxQoUIBVq1axdetWypUrx6hRo+ztr1y5wmOPPUaFChXw8/OjWLFi9k1/VqxYAcCoUaOoUKGCvU+dOnV4+eWXCQsL45FHHqF169aULFmSgwcPMnv2bGJjY+nbty+VK1e+rWu3rZfp7u5+W8cBGDx4MJcuXWLUqFHJ3q8A/fv3p1u3bg59P/74Y1auXIm/vz8lSpTAx8eHI0eOsGjRIs6fP0+9evV47733HPr4+/tz8OBBatWqxaFDh1zeL0FBQbe9MZSIiMh9x9VOi3qk/wPY4ufnZ+5FERERJiIiIrPDELnv6N6TzLZ9+3azffv2DD3H2bNrzeYtHc2y5WWcHpu3dDRnz67N0PMnJzo62kRHRzuV//bbb6ZTp06mVKlSJnv27CZHjhymcuXKpl+/fubIkSNO7SMiIgxg/P39U3X+hIQE88svv5gWLVqYQoUKGXd3dwMYwDz66KNm9+7dLvt99tlnplmzZqZEiRLG29vbeHp6muLFi5u2bduaxYsXJxvfrR5pFRYWlqpjd+7c+ZbtO3fu7HSe/fv3mx49epgyZcoYT09P4+XlZcqVK2feeOMNc/ToUZevE2AmTpzo8vW41fvVqFEjA5hixYqZuLi4ZNvZricsLOymx7uxfXKP5OJat26dad68ufH19TXZsmUzZcqUMf369TORkZEO7WJjY83QoUNNYGCgKVq0qPHy8jLZs2c3ZcuWNS+99JLZuHGjy+MnJCSYsLAw4+/vbx544AHj7u5u8ubNaxo2bGj+85//pOjakl7f0qVLner+97//GcC0b9/e6d5L67/DTZs2mc6dO5tSpUoZLy8v+3EefPBBlzEYY8z8+fPN888/bx566CGTO3du4+HhYQoUKGACAwPNN998Y65du+bUJyX3UEp/n9+Jn7siydFnT5HMca/fe35+fgbYYtKQa7NMCnYJlNtnWdYWPz8/v/RaiP5uYlsMPbVTtETk9ujek8y2Y8cOACpWrJjh57p4cTfnzq0nLu4iHh45yZu3XqatoQlw4cIFAHLlypVpMdzor7/+4vHHH8fHx4fVq1ffcnd0SV6bNm3YtGkT+/fvT7f1NuX2fP755/Tt25eNGzdSsWLFDLn3Lly4wOOPP8727duZNm2aw4jfu8Wd/LkrciN99hTJHPf6vVerVi22bt261RiTsp35ktCamiIiInLXy5mzPMWLB1G69BsULx6UqQnNu1XVqlX55ZdfOHPmDIGBgRw6dCizQ8qSjDGsWbOGt99+WwnNu8iqVat45plnMjSZlytXLubPn0+BAgXo0KEDixcvzrBziYiIyO3TmpoiIiIi94innnqK6dOn89///pfVq1fz0ksvZXZIWY5lWcnu5C2ZZ8aMGcC/o6QzSvHixVm0aBGzZs3izz//pGHDhkpui4iI3KWU1BQRERG5h7Rs2ZKWLVve8fOeP3+ezz77LEVttSmK3M2qV6/utMmWiIiI3H2U1BQRERGR23b+/HlCQkJS1DYgIEBJTRERERG5LUpqioiIiMhtK1WqFNqAUkRERETuFG0U5IJlWQ0sy5prWdYxy7KMZVlBmR2TiIiIiIiIiIiIJFJS07WcwDagD3Alk2MRERERERERERGRJDT93AVjzEJgIYBlWeGZG42IiIiIiNzvtLyDiIiIoyw5UtOyrLaWZY23LGuNZVnR16eI/3CLPsUsy/rOsqzjlmXFWJZ10LKszyzLynun4hYREbmXWJYFQEJCQiZHIiJy77MlNW0/e0VERO53WXWk5iCgOnAROApUuFljy7LKAuuBgsAcYCdQh8Tp5U0sy3rMGHM2QyMWERG5x3h5eXH16lUuXbpErly5MjscEZF72qVLl4DEn70iIiKSRUdqAm8C5YHcwKspaP8liQnN3saYVsaYd40xDYGxwMPA8AyLVERE5B5lS2SeOHGCCxcukJCQoOmRIiLpyBhDQkICFy5c4MSJEwD6TyQREZHrsuRITWNMhO37W02/sCyrDPAkcBCYcEP1EOAV4CXLsvoZYy6lb6QiIiL3Ll9fXy5dusTly5c5evRoZodzR8XHxwPg7u6eyZGI3F/u93vPx8cHX1/fzA5DRETkrpAlk5qp1PD611+NMQ6LfhljLliWtY7EpOf/Actv92SWZW1JpqrChQsXWLly5e2e4q5z4cIFgHvy2kTuZrr35G7h4eGBh4cHbm5ZdQJI6t3viRWRzHK/3nsJCQnExcVx8uRJDhw4kNnhyH1Knz1FMse9fu/Zri8t7oek5sPXv+5Opn4PiUnN8lxPalqWlRMod73eDShhWVYNINIYczjjQhUREcl64uLiiIuLy+ww7ijbhy9NAxW5s3TviYiIiM39kNTMc/1rVDL1tvIHkpTVBiKSPA+5/pgMBN3sZMaYWq7KLcvakitXLr+AgICbR5sF2f634F68NpG7me49kcyj+08kc+jeE8k8uv9EMse9fu/dzn9U3g9JzVuxLcpp39nAGLMySbmIiIiIiIiIiIjcRe6Hxa9sIzHzJFOf+4Z2IiIiIiIiIiIiche7H5Kau65/LZ9M/UPXvya35qaIiIiIiIiIiIjcRe6HpKZtbcwnLctyuF7LsnIBjwFXgI13OjARERERERERERFJvXs+qWmM2Qf8CpQCXr+hOgTIAXxvjLl0h0MTERERERERERGRNMiSGwVZltUKaHX9aeHrXx+1LCv8+vdnjDFvJ+nyGrAe+NyyrEBgB1AXeILEaecDMyjOXIBtG6dsCQkJGXEaERERERERERGR+4pljLl1q7uMZVnBwJCbNDlkjCl1Q5/iwFCgCZAP+AeYDYQYYyLvRJweHh5Uq1YtI06VqS5cuABArly5btFSRNKT7j2RzKP7TyRz6N4TyTy6/0Qyx71+7+3YsYMrV65EGmPypbZvlkxqZhU3jNTcCOQEDmReRBmmwvWvOzM1CpH7j+49kcyj+08kc+jeE8k8uv9EMse9fu+VAqKNMaVT21FJTbltlmVtATDG1MrsWETuJ7r3RDKP7j+RzKF7TyTz6P4TyRy695J3z28UJCIiIiIiIiIiIvcWJTVFREREREREREQkS1FSU0RERERERERERLIUJTVFREREREREREQkS1FSU0RERERERERERLIU7X4uIiIiIiIiIiIiWYpGaoqIiIiIiIiIiEiWoqSmiIiIiIiIiIiIZClKaoqIiIiIiIiIiEiWoqSmiIiIiIiIiIiIZClKaoqIiIiIiIiIiEiWoqSmiIiIiIiIiIiIZClKaoqIiIiIiIiIiEiWoqSmOLEsq5hlWd9ZlnXcsqwYy7IOWpb1mWVZeTPjOCL3k9u9byzLymdZVjfLsmZZlrXXsqwrlmVFWZa11rKsrpZl6ee+iAsZ8TvLsqyXLMsy1x/d0jNekXtJet5/lmXVtyxrhmVZ/1w/1j+WZf1qWVazjIhdJCtLx7/7ml+/z45e/+y537KsaZZlPZpRsYtkVZZltbUsa7xlWWssy4q+/jnxhzQe677PuVjGmMyOQe4ilmWVBdYDBYE5wE6gDvAEsAt4zBhz9k4dR+R+kh73jWVZPYGvgH+ACOAwUAhoA+QBZgDtjH74i9hlxO8sy7KKA38B7kBOoLsxJjQ94xa5F6Tn/WdZ1iBgGHAGmE/i78L8QE0gwhjTP90vQCSLSse/+0YA/YGzwGwS779ywDOAB9DJGJOmhI3IvciyrP8B1YGLwFGgAvCjMebFVB5HOReU1JQbWJa1BHgS6G2MGZ+k/FPgTeAbY0zPO3UckftJetw3lmU1BHIAC4wxCUnKCwObgOJAW2PMjAy4BJEsKb1/Z1mWZQFLgdLATOBtlNQUcSkdP3u2A34BlgFtjDEXbqjPZoy5lq7Bi2Rh6fS5szBwDDgNVDPGnEpS9wSwAjhgjCmTAZcgkiVdvzeOAnsBfxIHoqQlqamcC0pqShKWZZUB9gEHgbI3JERykfi/3RZQ0BhzKaOPI3I/uRP3jWVZ7wPDgS+MMb1uO2iRe0BG3HuWZfUBxgIBQENgCEpqijhJx8+ebiT+cVgIKGWMOZ2RcYtkdel479UFNgJzjTEtXdRHk5hzyJW+VyByb7AsK4A0JDWVc/mX1laTpBpe//pr0psC4Pr/dq8DfID/u0PHEbmf3In7xjZCJe42jiFyr0nXe8+yrIrAJ8A4Y8zq9AxU5B6UXvdfPRJHRi8Ezl1f32+AZVl9tKafiEvpde/tAWKBOpZl5U9aYVlWAyAXiaOnRSR9KedynZKaktTD17/uTqZ+z/Wv5e/QcUTuJxl631iW5QF0uv50cVqOIXKPSrd77/p9NoXEtWzfv/3QRO556XX/PXL960lgK4nraX4CfAastyxrlWVZBW4jTpF7Tbrce8aYSGAAiaOkt1uW9a1lWR9blvUL8CuJS7H0SId4RcSRci7XeWR2AHJXyXP9a1Qy9bbyB+7QcUTuJxl933wCVAEWGmOWpPEYIvei9Lz3BpO4IcnjxpgrtxmXyP0gve6/gte/9gQOAI2A34CSwBjgKWAaiUtCiEg6/u4zxnxmWdZB4Duge5KqvUB40nU2RSTdKOdynUZqSmpY17/e7kKs6XUckftJmu8by7J6A/1I3BHvpfQMSuQ+kKJ7z7KsOiSOzhxjjNmQ4VGJ3B9S+rvPPUn7tsaY5caYi8aYv4HWJG7I4K+p6CIpluLPnZZl9QemA+FAWRI3rKwF7Ad+tCxrZAbFKCLJu29yLkpqSlK2bH6eZOpz39Auo48jcj/JkPvGsqzXgXHAduCJ69OERORft33vJZl2vhv4IP1CE7nnpdfvvnPXv+43xvyRtOL6qGnbDIU6qY5Q5N6ULvfe9U1ORpC4UdBbxpj9xpjLxpitJP6HwjGg3/VNTUQk/Sjncp2SmpLUrutfk1t34aHrX5NbtyG9jyNyP0n3+8ayrL7AF8A2EhOaJ9Icnci9Kz3uvZzX+1cErlqWZWwPEnc+B5h4veyz2w1Y5B6S3p89zydTb0t6eqcsLJF7Xnrdey2uf424scIYcxnYRGLOoWZqAxSRm1LO5TqtqSlJ2X4ZPWlZllvSXbQsy8oFPAZcATbeoeOI3E/S9b6xLGsAieto/g9obIw5k77hitwz0uPeiwEmJVPnR+Ifc2tJ/ACqqeki/0qv332rgTjgIcuyPI0xsTfUV7n+9eDthyxyT0ive8/r+tfkNuKyld94T4rI7VHO5TqN1BQ7Y8w+EnepKwW8fkN1CInro3xvjLkEYFlWNsuyKliWVfZ2jiMi6Xf/Xa/7gMSE5hYgUAlNkeSlx71njLlijOnm6gHMvd5s8vWynzP8okSyiHT87HkG+JnEaXiDk9ZZltWYxI2CooDFGXAZIllOOn7uXHP96yuWZRVNWmFZVlMSEytXgfXpewUi9wflXG7NMuaeXzdUUuH6zbKexF0k5wA7gLrAEyQOXa5njDl7vW0pEneYPGSMKZXW44hIovS4/yzL6kziQu3xwHhcr6Ny0BgTnkGXIZLlpNfvvmSOHUziFPTuxpjQDAhfJEtLx8+eBYF1QDkSEy2bSNz9vDWJGyU8b4yZlvFXJJI1pNPnTjcS16xtBFwAZgEnSFyOpQWJm5X0NcaMuyMXJZIFWJbVCmh1/WlhEv/jbT///ifBGWPM29fblkI5l5vS9HNxYIzZZ1lWbWAo0ARoBvwDfA6EpHSTkfQ6jsj9JJ3um9LXv7oDfZNps4rExKeIoN9ZIpkpHT97nrIsqy4wiMRE5v+RmGRZAHxsjLnnp+CJpEZ63HvGmATLspqROFLsORLvPR8gElgIfG6M+TWDLkEkq6oBdL6hrMz1B8Ah4O1bHUSfXxNppKaIiIiIiIiIiIhkKVpTU0RERERERERERLIUJTVFREREREREREQkS1FSU0RERERERERERLIUJTVFREREREREREQkS1FSU0RERERERERERLIUJTVFREREREREREQkS1FSU0RERERERERERLIUJTVFREREREREREQkS1FSU0RERERERERERLIUJTVFREREREREREQkS1FSU0RERERERERERLIUj8wOQERERETufpZllQIOAJONMUGZG03GsiwrG1APeBjIB5wB9gOrjTHXMjM2EREREUmkpKaIiIiI3FGWZR0EMMaUytxIHFmWlQ94F3gFyO2iyVnLsr4DQowxl1J57AeA7kANoCZQHnAHGhtjlt2knzvQC+gCPARcATYCHxpj1qcmBhEREZF7iWWMyewYREREROQud330Ylkgyhjzz20e6yDcXUlNy7IeB6YDvsAU4Bfgv8A5EkdrVgbaA0HAP8DTxpi/UnH8GtePB3AUyAYU4iZJTcuyrOtxtAV2AfOux9cByA48a4yZk4rLFBEREblnKKkpIiIiInfU3ZbUtCzrUSAC2ENionD3TdpWAH4mMSFZ3xizJ4XnyAv4Af81xkRalhUOdObmSc2OwFRgPRBojLl6vfwRYC0QBZQ1xlxI0YWKiIiI3EO0UZCIiIiI3JJlWaUsyzLXk3FJy8Ovl5eyLKuHZVl/WZZ11bKsk5ZlfWtZVp4kbQMsyzJASaDk9X4mmeNWuH7sI5ZlxVw/3lTLsh52EZsthjKWZfWyLOtPy7KuWJa1MgXXlRuYAfwJ1LtZQhPAGLMTaAicBaZcH015S8aYc8aY5caYyJS0v+7V618H2RKa14/1O4mJ1QIkjuIUERERue8oqSkiIiIi6WHk9ccfwATgGIlrSM5K0uYgEELiCMOo69/bHrNtjSzLagJsBV4AfgfGAcuBNsAmy7L8kolhHDAM+Ov69+tSEPfbJK6f2dE24tGyrBKWZU23LCv6+mPu9STrXsuygo0xZ0lc47Iu0CQF50g1y7K8SNys6DKwxkWTRde/NsyI84uIiIjc7bRRkIiIiIikh/8DqhpjDgNYluUBrACesCyrjjFmkzHmIBBsWVYQgDEm+MaDXJ+m/R8Sk3kNjDHbk9RVBn4DQkmcyn0jP6CmMeZASgK+PsqyKzDFGLMvyfnXAMWBOSTuev44idO97QMCjDG/WZa1BWjHvwnG9FSOxI2E9htj4lzU26a9l8+Ac4uIiIjc9f6/vbsJsbKK4zj+/etmenUiJQzKKFpU1NDCaMgBNxUFLYwBS6JFMu0qoheCFkYMLsIgKGwhObirRTUxCgWNkJBKBGFkIERIRUSO2Ctk4PxbnOfK9c6daR68M87jfD9wOfee5+X8Z5Y/zoszNSVJktQLr7YCTYAqiBurft5V4z2PA/3AtvZAs3rnUWAXcGdE3Nrl2dfmG2hWbgeupexb2fIscD0wkpmbMvM5YBD4BLiq4/lDwECN8epoLdv/fZbrrf7+BRpfkiRpSXOmpiRJknrhyy59P1ZtZxg4l8GqHYiIV7pcb81MvAX4tuPaFzXGAbihao+19d1LOd18d6sjM6cjYhTY0vH838AVNcfsldZenp76KUmSliVDTUmSJPXCb136WsumV9Z4z9VVO/I/913epe+XGuMAXFq17bMh1wA/ZGZnWHi8y/PXAb/WHHO+WjWtmuX6lR33SZIkLSsuP5ckSdJS0grpBjIz5vjs6fJs3VmLJ6p2bVvfFGX5eadz+iLiMuB+4EDNMefrO+AMcGO1P2mnm6t2ztPaJUmSLlaGmpIkSVpsZ5h99ubhqh1ahDq+BqaBjW19nwJrW4cZwdkDhV5q+70SeBPoA3YuRGGZeRo4SJlN2u1/8UDV7l+I8SVJkpY6Q01JkiQttpPAmoi4pMu1McpS9m0RMeOAoYhYEREbe1FEZp6ghKhPtnW/AfwM7I6IDyJiB/A58BBwCngQOAo8AmzJzJ96Ucss3q7a0Yjoa3VGxHpgM2Wm6fsLOL4kSdKS5Z6akiRJWmyTwHrg44g4AJwGjmTmRGaejIhh4EPgcERMUkLEacoS8EHKvpt93V9d23Zgb0Rszcx3MnMqIoaA1ymHBgF8BmygBIirq/p3ZOaxrm+cRRWQrq5+bqjaFyLiser7eGaOtz3yLvAwMAx8FRETlL99M2Wm60hm/lGnBkmSpIuFoaYkSZIW2yjQT5n9eA8loNsDTABk5mRE3AE8T9m3cgj4lzKDcj89nJ2Ymfsi4j1gZ0RMZeZHmfk9sKnL7bed53DDwLqOvvvavh8Hxttqy4h4lLIM/QngKeAfyj6eo5l58DzrkSRJaqyYebCjJEmStHxUy+D3UfbWfIsSGM441TwirgGeBsjMlxezRkmSJJ3LUFOSJEnLXnXC+HbgGSCAQ8A3lP09VwEDwN3AX8CLmbnrwlQqSZIkMNSUJEmSzoqIdcBWyrLwmyiB5ingCLAXGMvMPy9chZIkSQJDTUmSJEmSJEkNs+JCFyBJkiRJkiRJdRhqSpIkSZIkSWoUQ01JkiRJkiRJjWKoKUmSJEmSJKlRDDUlSZIkSZIkNYqhpiRJkiRJkqRGMdSUJEmSJEmS1CiGmpIkSZIkSZIaxVBTkiRJkiRJUqMYakqSJEmSJElqFENNSZIkSZIkSY1iqClJkiRJkiSpUQw1JUmSJEmSJDWKoaYkSZIkSZKkRvkPr7aLnwvFV6IAAAAASUVORK5CYII=\n", - "text/plain": [ - "
" - ] - }, - "metadata": { - "image/png": { - "height": 440, - "width": 666 - }, - "needs_background": "light" - }, - "output_type": "display_data" - } - ], - "source": [ - "allkeys=\"\"\"\n", - "IVF16k IVF16k,SQ8 IVF16k,SQ8_nores \n", - "IVF16k,SQ6 IVF16k,SQ6_nores \n", - "IVF16k,SQ8_PQ32 IVF16k,SQ8_PQ32_nores \n", - "IVF16k,SQ4 IVF16k,SQ4_PCAR100 \n", - "IVF16k,RR192_PQ32 IVF16k,PQ64 IVF16k,PQ48\"\"\"\n", - "\n", - "for key in sorted(allkeys.split()): \n", - " print(key)\n", - " indexkey, res, keys, stats = parse_result_file(find_latest_version(\n", - " f\"../logs/{dsname}.{key}.b.log\"))\n", - " \n", - " if res.size == 0 or \"nores\" in key: \n", - " print(\"skip\", key)\n", - " continue\n", - " \n", - " r10 = res[:, 0]\n", - " qps = 1000 / res[:, 1]\n", - " \n", - " pyplot.semilogy(\n", - " r10, qps, \n", - " 'o-' if \"PQ\" in key else \n", - " \"+-\" if \"SQ\" in key else \"-\", \n", - " label=indexkey or key)\n", - "\n", - "pyplot.title(dsname)\n", - "pyplot.xlabel(\"inter @ 10\")\n", - "pyplot.ylabel(\"QPS\")\n", - "pyplot.legend()\n", - "pyplot.grid()\n", - "pyplot.gcf().set_size_inches(11, 7)" - ] - }, - { - "cell_type": "code", - "execution_count": 26, - "id": "c4edbd1f", - "metadata": {}, - "outputs": [], - "source": [ - "indexkey" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "5e103039", - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.8.10" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/notebooks/find_suitable_nq.ipynb b/notebooks/find_suitable_nq.ipynb deleted file mode 100644 index 80d465dff..000000000 --- a/notebooks/find_suitable_nq.ipynb +++ /dev/null @@ -1,323 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "id": "sealed-vertical", - "metadata": {}, - "source": [ - "The purpose of this notebook is to evaluate how many query vectors (nq) we need \n", - "to get an accurate estimate of the intersection @ 10 measure. \n" - ] - }, - { - "cell_type": "code", - "execution_count": 55, - "id": "hairy-relaxation", - "metadata": {}, - "outputs": [], - "source": [ - "import numpy as np\n", - "import faiss\n", - "from faiss.contrib import datasets\n", - "from matplotlib import pyplot" - ] - }, - { - "cell_type": "code", - "execution_count": 69, - "id": "emotional-filter", - "metadata": {}, - "outputs": [], - "source": [ - "# start with BigANN 10M\n", - "\n", - "ds = datasets.DatasetBigANN(10)" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "id": "alpha-webcam", - "metadata": {}, - "outputs": [], - "source": [ - "# we need more query vectors than the usual 10k, so pick them from the training set\n", - "\n", - "xtt = ds.get_train(maxtrain=2 * 10**6)\n", - "\n", - "big_xq = xtt[:10**6] # 1M queries \n", - "xt = xtt[10**6:] # 1M training vectors" - ] - }, - { - "cell_type": "markdown", - "id": "imported-variance", - "metadata": {}, - "source": [ - "# Ground truth" - ] - }, - { - "cell_type": "code", - "execution_count": 18, - "id": "adequate-jefferson", - "metadata": {}, - "outputs": [], - "source": [ - "index = faiss.IndexFlatL2(128)\n", - "index.add(ds.get_database())" - ] - }, - { - "cell_type": "code", - "execution_count": 19, - "id": "minus-defendant", - "metadata": {}, - "outputs": [], - "source": [ - "index = faiss.index_cpu_to_all_gpus(index, ngpu=1)" - ] - }, - { - "cell_type": "code", - "execution_count": 20, - "id": "trying-andorra", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "CPU times: user 4min 59s, sys: 2min 8s, total: 7min 8s\n", - "Wall time: 7min 8s\n" - ] - } - ], - "source": [ - "%%time\n", - "Dgt, Igt = index.search(big_xq, 10)" - ] - }, - { - "cell_type": "code", - "execution_count": 21, - "id": "maritime-oriental", - "metadata": {}, - "outputs": [], - "source": [ - "np.save(\"/tmp/Dgt.npy\", Dgt)\n", - "np.save(\"/tmp/Igt.npy\", Igt)" - ] - }, - { - "cell_type": "markdown", - "id": "supreme-jewel", - "metadata": {}, - "source": [ - "# With some index" - ] - }, - { - "cell_type": "code", - "execution_count": 27, - "id": "comic-therapy", - "metadata": {}, - "outputs": [], - "source": [ - "# we pick some index that has one search-time parameter \n", - "# to represent different speed-accuracy tradeoffs.\n", - "\n", - "index = faiss.index_factory(128, \"IVF16384,SQ4\")\n", - "index = faiss.index_cpu_to_all_gpus(index, ngpu=1)\n", - "\n", - "index.train(xt)\n", - "\n", - "index.add(ds.get_database())" - ] - }, - { - "cell_type": "code", - "execution_count": 48, - "id": "perfect-associate", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "1\n", - "4\n", - "16\n", - "64\n" - ] - } - ], - "source": [ - "# run queries \n", - "\n", - "res_per_nprobe = {}\n", - "for nprobe in 1, 4, 16, 64:\n", - " print(nprobe)\n", - " D, I = [], []\n", - " index.nprobe = nprobe\n", - " # Faiss crashes when searching all at once\n", - " for i0 in range(0, 10**6, 10000): \n", - " Di, Ii = index.search(big_xq[i0 : i0 + 10000], 10)\n", - " D.append(Di)\n", - " I.append(Ii)\n", - " D = np.vstack(D)\n", - " I = np.vstack(I)\n", - " res_per_nprobe[nprobe] = I" - ] - }, - { - "cell_type": "markdown", - "id": "novel-yukon", - "metadata": {}, - "source": [ - "# Stats on intersection measure" - ] - }, - { - "cell_type": "code", - "execution_count": 70, - "id": "polish-sheep", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "nprobe 1 inter 25.914 %\n", - "nprobe 4 inter 49.756 %\n", - "nprobe 16 inter 68.647 %\n", - "nprobe 64 inter 77.002 %\n" - ] - } - ], - "source": [ - "# evaluate intersection measures for the 1M queries\n", - "\n", - "for nprobe in 1, 4, 16, 64: \n", - " I = res_per_nprobe[nprobe]\n", - " ninter = faiss.eval_intersection(I, Igt)\n", - " print(f\"nprobe {nprobe:-5d} inter {100 * ninter/I.size:.3f} %\")" - ] - }, - { - "cell_type": "code", - "execution_count": 60, - "id": "consistent-service", - "metadata": {}, - "outputs": [ - { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAZgAAAEGCAYAAABYV4NmAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjMuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8vihELAAAACXBIWXMAAAsTAAALEwEAmpwYAAB1OUlEQVR4nO2dZ3hVxdaA35XeEwghhASk14B0FUIRlCIioIgdEBS7YL3y3atiL6gU7xWlIyqIIAjSFYL03pt0CQQCCamkZ74feyckIT3n5CRh3ufZz9ln7T0za3JOztozs2YtUUqh0Wg0Go2lsbO1AhqNRqOpnGgDo9FoNBqroA2MRqPRaKyCNjAajUajsQrawGg0Go3GKjjYWoHyQrVq1VSdOnVKXD4hIQF3d3fLKVQBuNn6fLP1F3SfbxZK0+ddu3ZdUUr55XVNGxiTOnXqsHPnzhKXDw0NpVu3bpZTqAJws/X5Zusv6D7fLJSmzyJyNr9reopMo9FoNFbBagZGRFxEZLuI7BORQyLynikfKyLnRWSvedyTrcwYETkhIsdEpFc2eVsROWBemyQiYsqdReRnU75NROpkKzNURI6bx1Br9VOj0Wg0eWPNKbJkoLtSKl5EHIGNIrLCvDZeKfVF9ptFpBnwMNAcqAn8ISKNlFLpwGRgJLAVWA70BlYAI4CrSqkGIvIw8BnwkIhUBd4F2gEK2CUiS5RSV63YX41Go9Fkw2oGRhkxaOLNt47mUVBcmv7APKVUMnBaRE4AHUTkDOCllNoCICLfAwMwDEx/YKxZfgHwX3N00wtYo5SKMsuswTBKc4vTh9TUVMLCwkhKSir0Xm9vb44cOVKc6is8Je2zi4sLQUFBODo6WkErjUZTXrDqIr+I2AO7gAbA/5RS20SkD/CiiAwBdgKvmSOLQIwRSiZhpizVPM8tx3w9B6CUShORGMA3uzyPMkUmLCwMT09P6tSpgzkrly9xcXF4enoWt4kKTUn6rJQiMjKSsLAw6tatayXNNBpNecCqBsac3molIj7AIhEJxpju+gBjNPMB8CUwHMjrF1wVIKeEZbIQkZEYU2/4+/sTGhqa47q3tze+vr7Ex8fnLnoD6enpxMXFFXpfZaKkfXZyciI6OvqGv3d5Jz4+vsLpXFpupj67bNuOx2+/UT0qioNVqxLfvz9Jt3WwtVplgrU+5zJxU1ZKRYtIKNA7+9qLiEwFfjffhgG1shULAi6Y8qA85NnLhImIA+ANRJnybrnKhOah1xRgCkC7du1Ubje9I0eO4OXlVaQ+6hFM8XBxcaF169YW1si6aPfVykvM0qWEz52LMqfD7aOi8Jk7l4BmTfHu18/G2lkfa33O1vQi8zNHLoiIK3AXcFREArLdNhA4aJ4vAR42PcPqAg2B7UqpcCBORG4311eGAL9lK5PpITYIWGuu/awCeopIFRGpAvQ0ZRqNRnMDEeMnZBmXTFRSEhHjJ9hGoUqCNffBBADrRGQ/sANj0f134HPT5Xg/cCfwCoBS6hAwHzgMrAReMKfYAJ4DpgEngJMYC/wA0wFf0yHgVeAts64ojOm3HebxfuaCv8Zg2LBhLFiwoMTlhw8fTr169QgODragVhqNbUgLDy+WXFM0rOlFth+4YQ5EKfVEAWU+Aj7KQ74TuOGXTCmVBDyYT10zgBnFULnULN5znnGrjnEhOpGaPq680asxA1oX27fAYqSlpeHgYJ2PeNiwYTz55JM899xzVqlfoylLHAICSLtwIU+5puTonfwWYtnBS4z59QDnoxNRwPnoRMb8eoDFe86XuM4zZ87QtGlTnn76aZo3b07Pnj1JTEwEoFu3bowePZqOHTsSHBzM9u3bARg7diwjR46kZ8+eDBkyhLNnz9KjRw9atmxJjx49+Oeff7Lq/+OPP+jcuTONGjXi99+NpbD09HTeeOMN2rdvT8uWLfnuu+/y1K1Lly5UqVKlxH3TaMoTfi+/DLk8RcXFheqvjLaNQpUEHYusiLy39BCHL8Tme33PP1dJSc/pqJaYms6bC/Yzd/s/eZZpVtOLd/s1L7Dd48ePM3fuXKZOncrgwYNZuHAhjz/+OGAEqNu8eTN//fUXw4cP5+BBYzlr165dbNy4EVdXV/r168eQIUMYOnQoM2bM4OWXX2bx4sWAYcDWr1/PyZMnufPOOzlx4gTff/893t7e7Nixg+TkZDp16kTPnj21S7GmcpOWCkphX6UKaVevIoBH9+43xQK/NdEjGAuR27hcl2eUqt66devSqlUrANq2bcuZM2eyrj3yyCOAMZqIjY0lOjoagPvuuw9XV1cAtmzZwqOPPgrAE088wcaNG7PKDx48GDs7Oxo2bEi9evU4evQoq1ev5vvvv6dVq1bcdtttREZGcvz48VL1QaMpz2SkpHD5m29wadmShps3EfHtZNw73kHizp2olBRbq1eh0SOYIlLYSOOOj/8gPDb5Bnmgjys/P3NHidt1dnbOOre3t8+aIgNu2PyZ+b6gsNvZy+RVXinF119/Ta9evXIX1WgqJdE/zyftQjgBH3yQ9T9RdfgIzj31FDG/L8Pn/oE21rDiokcwFmLUnXVwdbTPIXN1tOeNXo2t1ubPP/8MwMaNG/H29sbb2/uGezp27Mi8efMA+PHHHwkJCcm69ssvv5CRkcHJkyc5deoUjRs3plevXkyePJnU1FQA/v77bxISEqzWB43GlmRcu8aV777DrUMH3Dt2zJK7d+qIc+PGRM2cgbHzQVMS9AjGQvQN9sfFxbVMvciqVKlCx44diY2NZcaMvB3mJk2axPDhwxk3bhx+fn7MnDkz61rjxo3p2rUrly5d4ttvv8XFxYWnnnqKM2fO0KZNG5RS+Pn5Za3ZZOeRRx5h3bp1REZGEhQUxHvvvceIESOs1VWNxipE/fgj6Veu4Ddp0g2je9/hT3LhX2+RsGEDHl262FDLCoxSSh9K0bZtW5Wbw4cP3yDLj9jY2CLfawm6du2qduzYUaZt5qY0fS7O37a8sG7dOlurUOZU5j6nxcaqox1uU2dHjswhz+xzRnKy+rtLV3Vm6DAbaFe2lOZzBnaqfH5X9RSZRqO5KYmaOZOMmBiqjxqV53VxcqLqkCFc27qVxEOHyli7yoE2MBWU0NBQ2rVrZ2s1NJoKSVpUFFGzZuPZqxcuzZrle5/P4Aexc3cnasbMfO/R5I82MBqN5qYjcuo0MpKS8Hv5pQLvs/f0xOehh4hduZLU8yXfNH2zog2MRqO5qUi9FMHVn37C+777cK5fv9D7qz7xOIgQ9f33ZaBd5UIbGI1Gc1Nx5dvJqIwMqr34QpHudwwIwLvvPVz9ZQHpMTFW1q5yoQ2MRqO5aUg5d47oXxbgM+gBnIKCCi9gUnX4cNS1a1yd97MVtat8aANzk1LacP1gBMZs3bo19957r4W00misy5X//g+xt6fas8WLAu7SuDHunToR9cMcMnT4mCKjDYwl2T8fxgfDWB/jdf98m6qTlpZm1fonT55M06ZNrdqGRmMpkk+eJGbpUqo8+iiO/tWLXd53xHDSL18hdulSK2hXOdEGxkI4HFkES1+GmHOAMl6XvlwqI1Oew/WHhYWxatUqnnrqqRL3T6MpSy5P+ho7Fxd8Rz5dovJud9yBc9OmRM6YicooXRDbmwUdKqaorHgLLh7I97JL2HZIzzV0Tk2E316EXbPzLlSjBfT5tMBmy2u4/tGjR/P++++Tof/RNBWApMOHiVu1imrPP49DCfMYZYWPeeNN4v/6C08r5LCvbOgRjKXIbVyy5DdGWC4O5TFc/++//0716tVp3fqGhKUaTbkkYuJE7Ly9qfrksFLV49W7Nw4BAXrjZRHRI5iiUshIQ33ZDInLYyOWdy14clmJmy2P4fo3bdrEkiVLWLZsGcnJycTGxvL444/zww8/FK1TGk0Zcm33bhLW/4Xfa69i7+lZqrrE0ZGqQ4YQ8dlnJB44iGuLGzK5a7KhRzAWIrnzW+DomlPo6Ao93rFam7YK1//JJ58QFhbGwYMHmTdvHt27d9fGRVMuUUpxefwE7KtVo+pjj1mkTp8HB2Hn4UHUzLwjmGuuYzUDIyIuIrJdRPaJyCERec+UVxWRNSJy3Hytkq3MGBE5ISLHRKRXNnlbETlgXpsk5qO3iDiLyM+mfJuI1MlWZqjZxnERGWqtfmaS1nQg9JtkjFgQ47XfJGg52GptZobrf/bZZ5k+fXqe90yaNImZM2fSsmVL5syZw8SJE7OuZYbr79OnT45w/c2aNaNNmzYEBwfzzDPPWN0bTaOxFgmbN3Ntxw6qPfssdm5uFqnT3sODKg8/ROzKVaSEhVmkzkpLfmGWS3uAkdbaPHcEtgG3A58Db5nyt4DPzPNmwD7AGagLnATszWvbgTvMOlcAfUz588C35vnDwM/meVXglPlaxTyvUpC+Olx/8dHh+is/FbnPGRkZ6tSgB9Xfd96p0pOTi1yuKH1OuXhRHQ5uocI/+LAUGpYfKly4frPtePOto3kooD+Q6VY1GxhgnvcH5imlkpVSp4ETQAcRCQC8lFJbzM58n6tMZl0LgB7m6KYXsEYpFaWUugqsAXpbp6cajaY8Er92LUkHDuD3wgvYOTlZtG5Hf3+8+/YleuFC0k3nGs2NWHWRX0TsgV1AA+B/SqltIuKvlAoHUEqFi0jmjqdAYGu24mGmLNU8zy3PLHPOrCtNRGIA3+zyPMpk128kMBLA39+f0NDQHNe9vb2Ji4srUl/T09OLfK8lWGpu9irLNnNTmj4nJSXd8Pcu78THx1c4nUtLhe1zRgZVP/oY8a/OXh8fKEYfitpn+xbBVFu8mN0ff0LCPX1KrGp5wFqfs1UNjFIqHWglIj7AIhEpyOVC8pCpAuQlLZNdvynAFIB27dqpbrn82o8cOYJnEb1O4uLiinxvZaE0fXZxcalwbs6hoaHk/o5Udipqn2OW/s6FCxeo+eUXtOjRo1hli9Pnf0LXY795M20/eB+7bB6fFQ1rfc5l4kWmlIoGQjGmqS6Z016YrxHmbWFArWzFgoALpjwoD3mOMiLiAHgDUQXUpdFoKjkqNZXL//0a58aN8epj3ZGF74jhpF+5QsySJVZtp6JiTS8yP3Pkgoi4AncBR4ElQKZX11DgN/N8CfCw6RlWF2gIbDen0+JE5HZzfWVIrjKZdQ0C1prrNKuAniJSxfRS62nKNBpNJSd68WJSz/6D36hRiJ11n6HdbrsN52ZNiZo5S4ePyQNr/vUDgHUish/YgbHo/jvwKXC3iBwH7jbfo5Q6BMwHDgMrgRfMKTaA54BpGAv/JzE8yQCmA74icgJ4FcMrDaVUFPCB2e4O4H1TptFoKjEZKSlc+WYyLre2xOPOblZvzwgfM4KUU6eID11v9fYqGtb0ItuvlGqtlGqplApWSr1vyiOVUj2UUg3N16hsZT5SStVXSjVWSq3IJt9p1lFfKfWiOUpBKZWklHpQKdVAKdVBKXUqW5kZpryBUkrHdchFacP1Dx8+nHr16hEcfOOy2tdff03jxo1p3rw5b775ZmnU1GiKRfS8n0kLD6f66NE3RKqwFl69euJQM4CoGXrjZW70Tn4LsuzUMnou6EnL2S3puaAny06VPESMJbDmBslhw4bx66+/3iBft24dv/32G/v37+fQoUO8/vrrVtNBo8lOxrVrXPnuO9xuuw33O+4os3bF0RHfoUO5tnMnifv3l1m7FQFtYCzEqn9WMXbzWMITwlEowhPCGbt5bKmMTHkO19+lSxeq5BGVdvLkybz11ltZMdSqVy9+3g2NpiREzfmB9MhI/EaPKvO2vR8YhJ2nJ5HT9SgmOzrYZRH5bPtnHI06mu/1fZf3kZqRmkOWlJ7EO5veYcHfeU9FNanahH91+FeB7ZbXcP358ffff7Nhwwb+/e9/4+LiwhdffEH79u2LVFajKSnpsbFETp+OR7duuNnA/d3ew50qDz9M5PTppPzzD061a5e5DuURPYKxELmNSyYpGaVLr1oew/UXRFpaGlevXmXr1q2MGzeOwYMHZ4YO0misRuTMmWTExuI36mWb6VDl8cfB3p6oWfnkf7oJ0SOYIlLYSOOu+XdxKfHSDfIA9wBm9i65j0F5DNdfEEFBQdx///2ICB06dMDOzo4rV67g5+dXovo0msJIi4ri6uzv8ezTGxcbpvB29K+Od79+RP/6K9VeerHEic0qE3oEYyGebf4sLvYuOWQu9i6MamO9+WBbhesviAEDBrB27dqssikpKVSrVq3EfdRoCiNyylQykpLwe+klW6uC75PDUElJXJ0719aqlAv0CMZC9KrdC1dXVybunsjFhIvUcK/BqDaj6Fuvr9XazAzXHxsby4x8XCQnTZrE8OHDGTduHH5+fsyceX00lRmu/9KlSznC9Z85c4Y2bdqglMLPzy9rzSY7jzzyCOvWrSMyMpKgoCDee+89RowYwfDhwxk+fDjBwcE4OTkxe/bsMnMX1dx8pF68yNWffsK7f3+c69WztTo4N2yIe9cuXP3hR3xHjKjQ4WMsQn5hlm+2Q4frLz46XH/lp7z3+cI776rDwS1U8rkwi9VZ2j7Hb92mDjduoqLm/WwZhcqACheuX6PRaKxJyrlzRC9cSJUHH8Qp6IZg6TbDrUN7XJo3J2rmzJs+fIw2MBWU0NBQ2rVrZ2s1NBqbceW//0UcHPB99hlbq5IDEcF3xHBSzpwhft06W6tjU7SB0Wg0FY7k48eJWbKUKo89imM53Mzr2bMnjoGBN/3GS21gNBpNhePypK+xc3PD96mnbK1KnoiDA1WHDiVx926u7dlja3VshjYwGo2mQpF48BBxa9ZQddiwcr3XxOeB+7Hz9iZqxs0ba1cbGI1GU6G4PHEi9t7eVH1ymK1VKRA7dyN8TNwff5By9qyt1bEJ2sDcpJQ2XH90dDRPPPEETZo0oWnTpmzZsiXH9S+++AIR4cqVK6VVVaPJ4tquXSRs2IDvyKex9/CwtTqFUuWxRxEHByJnzbK1KjZBGxgLErN0Kce79+BI02Yc796DmKVLbaqPNcP1jxo1irvuuoujR4+yb98+mmYL0XHu3DnWrFlDbR3wT2NBlFJcHj8Be79qVDHj65V3HKtXx6v/fcT8uoi0qJsv56E2MBYiYcVKwt9+h7QLF0Ap0i5cIPztd0plZMpruP7Y2Fj++usvhgwZAoCTkxM+Pj5Z11955RU+//xzvYNfY1ESNm3m2s6dVHv2WezMYK4VAd8nn0QlJ3P1p5svfIwOFVNELn78MclH8g/Xf23vXkjNGVFZJSUR/u//ED3/lzzLODdtQo3/+78C2y2P4fpPnTqFn58fzz33HIcPH6Zt27ZMnDgRd3d3lixZQmBgILfeemthf1KNpsgopbg8YQKONWtS5cEHba1OsXCuXx+Pbt24+uOP+I4YXqGMY2nRIxhLkZp3uH6VUvnC9aelpbF7925GjBjBnj17cHd359NPP+XatWt89NFHvP/++6Xqs0aTm7g//iDp4EGqvfgi4uRka3WKje+I4aRfvUrMb7/ZWpUyRY9gikhhI42/u91J+sWLN8gdatbkljnfl7jd8hiuPygoiKCgoKxEYoMGDeLTTz/l5MmTnD59Omv0EhYWRps2bdi+fTs1atQorKsaTZ6o9HSuTJqEU926eN/Xz9bqlAjXdu1wadGCyJkz8XnwQcTe3tYqlQlWG8GISC0RWSciR0TkkIiMMuVjReS8iOw1j3uylRkjIidE5JiI9MombysiB8xrk8T8ZRQRZxH52ZRvE5E62coMFZHj5jHUWv3MxOv55xGXnOH6xcWF6q+MtlqbtgrXX6NGDWrVqpU1svnzzz9p1qwZLVq0ICIigjNnznDmzBmCgoLYvXu3Ni6aUhG7fDnJx0/g9/JLiEPFfCbODB+TevYf4sx0FjcD1vy00oDXlFK7RcQT2CUia8xr45VSX2S/WUSaAQ8DzYGawB8i0kgplQ5MBkYCW4HlQG9gBTACuKqUaiAiDwOfAQ+JSFXgXaAdoMy2lyilrlqrs+59euPq6kLE+AmkhYfjEBBA9VdG493Pek9ctgzX//XXXzN8+HDS0tKoV69ejno1GkuhUlO5/PV/cW7SBM8SJsErL3jedReOQUFETZ+B191321qdsiG/MMuWPoDfgLuBscDreVwfA4zJ9n4VcAcQABzNJn8E+C77Pea5A3AFkOz3mNe+Ax4pSD8drr/46HD9lR9b9zlq3s/qcOMmKnbt2jJr05p9jpzzgzrcuIlK2LXbam2UBGuF6y+T8aY5ddUa2AZ0Al4UkSHAToxRzlUgEGOEkkmYKUs1z3PLMV/PASil0kQkBvDNLs+jTHa9RmKMjPD39yc0NDTHdW9vb+Li4orUx/T09CLfawnS09NJSEgo0zbz0qGk7SclJd3w9y7vxMfHVzidS4tN+5yaSrUJE0ivW5ddAGWkh1X77FcNP3d3jn32GTHPPWudNkqAtfpsdQMjIh7AQmC0UipWRCYDH2BMXX0AfAkMxxh55EYVIKeEZa4LlJoCTAFo166d6tatW47rR44cwdPTM4+qbiQuLq7I91qCDRs2lFlb+VGaPru4uNC6dWsLa2RdQkNDyf0dqezYss9Rs2dz6epV6o4fj/vtt1m9vWWnljFx90TCE8IJSAqwWkbaiKNHifz2O5rdcgvO2dz/bYm1PmeruimLiCOGcflRKfUrgFLqklIqXSmVAUwFOpi3hwG1shUPAi6Y8qA85DnKiIgD4A1EFVBXsTFGgBpLov+mmoKIWbqU493u5NInnyJOTqRdjrB6m8tOLWPs5rGEJ4QDEJ4QztjNY1l2apnF26r62GOIoyNRs2ZbvO7yhtVGMKan13TgiFLqq2zyAKVUuPl2IHDQPF8C/CQiX2Es8jcEtiul0kUkTkRux5hiGwJ8na3MUGALMAhYq5RSIrIK+FhEMkOt9sRY4ykWLi4uREZG4uvrq3elWwilFJGRkbjk8rjTaMAwLmH/+Td2yYYXo0pJIew//wYoscNMekY68anxxCbHEpsSS0xKDLEpscSlxGXJ5h2dR1J6Uo5ySelJTNw90eKjGIdq1fDu35+YRYvwe/klHHx9LVp/ecKaU2SdgCeAAyKy15T9H/CIiLTCmLI6AzwDoJQ6JCLzgcMYHmgvKMODDOA5YBbgiuE9tsKUTwfmiMgJjJHLw2ZdUSLyAbDDvO99pVSxAwEFBQURFhbG5cuXC703KSnppvvRLGmfXVxcCAoKKvxGzU3H2XEf45icc9OyXXIqZz//mKC7O2UZhNiU2KzzuJS4G2SZ53EpccSlFrxO6GjnSGpG3hulLybcuLfNElR98kmif/mFqz/+hN/LL1mljfKA1QyMUmojea+FLC+gzEfAR3nIdwLBeciTgDzjRiilZgClSifn6OiYI0RKQYSGhla4NYXScjP2WWNdHCKi85TbX46m689d8y3nYu+Cl5MXXs5eeDl5UcOtBg19Gma9z34t8/B08sTL2QsXexd6LeyVNT2WnRru1tnD5VyvLh7du3P1p5/wffqpShs+pmLuWtJoNJWSK17gF3ujPNILxnQYk6/BcLIvXfiYUW1GMXbz2BzTZE72ToxqM6pU9RaE74jhnF27luhFi6haQaJDFxdtYDQaTbkh9DZXHlyTmEOW5AArelblk6bW+xHOXGfJ9CIThAbeDaziRZaJa5s2uNzakqhZs6ny0EOVMnyMDnap0WjKBcevHsc1NoV04IonZACXvWDmvc6EDC84FqAl6FuvL6sHrebrW75mZMuRHI46zImrJ6zWnojgO3wEqf/8Q9wff1qtHVuiDYxGo7E5CakJvP7nK3Q5qIi7vQkf/KsWj4xx5MM3a9Fr5EdWHUnkxeNNH8fVwZVpB6dZtR3Pu3rgWLs2kTOmV0r3fT1FptFobIpSivc2v4ffnrN4JWQQNGwUq228odXHxYfBjQYz58gcXrj1BWp51Sq8UAkQe3uqDhvKpfc/4HjnzqRHRpVJHMOyQo9gNBqNTfnl719YcWYFT56qhUP16nhki/htS4Y2H4qDODDjUKmcUQtFzJQc6VciLZYNt7ygDYxGo7EZhyIP8en2T+np3o6q+87iPXBguQnJ7+fmx8CGA/ntxG9cSrhktXau/O+bG2QqKYmI8ROs1mZZoQ2MRqOxCbEpsbwW+hpVXaoyOrwlZGTg88D9tlYrB8OaDyNDZTDr0CyrtZEWfuP+m4LkFQltYDQaTZmjlOLtjW9zKeESX3T+nOQlK3C77Tacate2tWo5CPIMom+9viz4ewFRScUOBlIkHAICiiWvSGgDo9FoypzvD3/P2nNreaXtKzQ8k0LquXP4DHrA1mrlyYgWI0hOT+aHwz9Ypf7qr4wu82y4ZYU2MBqNpkzZG7GXCbsm0KN2D55o9gTRCxZi5+WFZznN8ljPux533XIXc4/OJTYljzADpcS7Xz8CPngfezPopX2VKgR88L72ItPYjsV7ztPp07XUfWsZnT5dy+I9522tkkZTKFeTrvL6+tep4V6D9zu9T0ZsLHGrV+N9773YleNgsU+3eJr41Hh+PvqzVer37tePhqHrsHN3x/PuuyuFcQFtYCoki/ecZ8yvBzgfnYgCzkcnMubXA9rIaMo1GSqDMRvHEJUUxZfdvsTLyYuYpb+jUlLK7fRYJk19m9I5sDNzDs/hWuo1q7Qhjo643X47CRs3VppNl9rAVEDGrTpGYmp6DlliajrjVh2zkUYaTeFMOzCNTec38VaHt2jm2wyA6IULcW7WFJdmzWysXeE83fJpriZfZeHxhVZrwyOkE6kXLpBy+ozV2ihLCjUwIlJfRJzN824i8rKI+FhdM02eKKU4H52Y57UL+cg1GluzPXw7/9v7P/rU7cODjYwMG4mHDpF85Ag+gwbZWLui0bp6a9r5t2PWwVmkpKdYpQ13c5NpwsaNVqm/rCnKCGYhkC4iDTASfNUFfrKqVpo8uRiTxJAZ2/O9bm8nnIgoOLmSRlPWXEm8wpt/vcktXrcw9o6xWdlhYxYuRJyd8b73XhtrWHSebvk0EYkR/HbyN6vU71SrFk633EL8ppvHwGQopdIw0htPUEq9AlR8B+0KxtJ9F+g14S92nrnKoLZBuDrm/Oic7O1wdrCj76SNfL/lTKWZw9VUbNIy0njzrzdJSE3gy65f4uboBkBGUhIxS3/Hs2dP7L28bKxl0bkj4A6CfYOZcWAGaRlpVmnDPSSEa9t3kJFinVFSWVIUA5MqIo8AQ4HfTZmj9VTSZCfmWiovz93DS3P3ULeaO8tHdeaLB2/lk/tbEujjigCBPq58Pqgl697oxh31fXnnt0M8OWsHEXFJhdav0ViTb/Z+w46LO/jP7f+hYZWGWfK41avJiIurMNNjmYgIT7d8mrD4MFaeWWmVNtxDOqESE0nctcsq9ZclRQn68yTwLPCRUuq0iNQFrLPjSJODDccv88Yv+7kSn8xrdzfiuW71cbA3ngkGtA5kQOvAG8rMHNaeH7ae5cNlR+g9YQOf3t+Cns2tk/ZVoymIDWEbmHpgKvc3vJ/+DfrnuBa9YCGOtWvj1qG9jbQrOd1qdaOBTwOm7Z/GPXXvwU4s6yvl3qEDODoSv2Ej7nfcYdG6y5pC/zJKqcPAv4Dd5vvTSqlPra3YzUxiSjrv/naQJ6Zvx8PFgUXPd+KlHg2zjEtBiAhP3FGHZS+HEODtwsg5uxjz636upVhnOK/R5MXFhIv838b/o1GVRozpMCbHtZSzZ7m2fTs+99+ftR5TkbATO55q8RQnY06y7p91lq/f3R23Nm0qxUJ/UbzI+gF7gZXm+1YisqQI5WqJyDoROSIih0RklCmvKiJrROS4+VolW5kxInJCRI6JSK9s8rYicsC8NknMb6WIOIvIz6Z8m4jUyVZmqNnGcREZWvQ/iW3Zdy6avl9vYPaWswzvVJffXwqhRZB3setpUN2TRc934tmu9Zm34xx9J21k77loyyus0eQiNT2V19a/Rkp6Cl92/RIXh5wbKKMX/gp2dngPHGAbBS1Arzq9qOVZiykHplhlvdM9pBPJf/9N6qUIi9ddlhRlbDcW6ABEAyil9mJ4khVGGvCaUqopcDvwgog0A94C/lRKNQT+NN9jXnsYaA70Br4Rkcwk1ZOBkUBD8+htykcAV5VSDYDxwGdmXVWBd4HbTN3fzW7IyiOp6RmMX/M390/eTFJKOj89dRvv9GuGi2PJ83Q7OdjxVp8mzH36dpJT03lg8ma+/vM4aekZFtRco8nJhN0T2H95P+91eo863nVyXFNpacQsWoRHly44+vvbRkEL4GDnwIjgERyOPMzmC5stXr9H584AJGzaZPG6y5KiGJg0pVRMLlmhJlspFa6UypxWiwOOAIFAf2C2edtsYIB53h+Yp5RKVkqdBk4AHUQkAPBSSm1RxqPC97nKZNa1AOhhjm56AWuUUlFKqavAGq4bpXLHycvxDJq8mYl/Hue+W2uyYnQXOjaoZrH6b6/ny4rRXejbIoAv1/zNw1O2ci7KOruRNTc3f579k+8Pf88jTR6hd50b/+XiN2wg7fLlcr9zvyjcV/8+/N38mbJ/isXrdm7cGHu/ahV+mqwoi/wHReRRwF5EGgIvA8Uy2ebUVWtgG+CvlAoHwwiJSHXztkBga7ZiYaYs1TzPLc8sc86sK01EYgDf7PI8ymTXayTGyAh/f39CQ0OL060cxMfHF7t8hlKs/SeN+cdScLSHF1o5094/mj3brPPUcn8ABChnvj98lbu/XMfjzZzoVNOhxPPgJelzReZm6y8Ur89XUq/wefjn1HaqTftr7fMs5/3tdzh6ebFLBMrp37I4fQ5xDmFhxEKmrZxGA5cGFtXDq359Utev5/jatWBn3aAr1vpuF8XAvAT8G0gG5gKrgA+K2oCIeGBs1hytlIot4McsrwuqAHlJy1wXKDUFmALQrl071a0UecBDQ0MpTvmLMUm8sWAfG45foVtjPz5/oCXVvawf7K8b8MTVa7w6fx/TDkQRji8fDQzGx82p2HUVt88VnZutv1D0PienJ/PE8idwcHTgu3u/I8gz6IZ70i5f5vjBg/g+OYzgHj2soK1lKM7nfFvabaxbuI6dDjt5qttTFtUjJi6eC1u3cZufH64tWli07txY67tdFC+ya0qpfyul2iul2pnnRdpgISKOGMblR6XUr6b4kjnthfmauYoVBtTKVjwIuGDKg/KQ5ygjIg6ANxBVQF3lgt/2nqfn+PXsPHOVjwYGM3NY+zIxLpkEVXFj7tO382bvxqw6dJHeEzaw6cSVMmtfU/kYt2McR6KO8FGnj/I0LgAxv/0G6el431/xp8cycXVw5YlmT7Dp/CYORR6yaN3unTqCCPEbNli03rIkXwMjIhPM16UisiT3UVjF5lrIdOCIUuqrbJeWYGzaxHz9LZv8YdMzrC7GYv52czotTkRuN+sckqtMZl2DgLXmOs0qoKeIVDEX93uaMpsSfS2FF3/azah5e2lQ3YMVozrz2G232MRV095OeL5bAxa/0Ak3Z3sem7aND38/TFKuIJoaTWEsO7WMn4/9zJPNn+TO2nfmeY9SiugFC3Ft2xbnekXxEao4PNT4ITwdPZm2f5pF63WoWhWXZs1I2FhxF/oLmiKbY75+UcK6OwFPAAdEZK8p+z/gU2C+iIwA/gEeBFBKHRKR+cBhDA+0F5RSmb92zwGzAFdghXmAYcDmiMgJjJHLw2ZdUSLyAbDDvO99pZR18p0WkfV/X+bNBfuIjE/hjV6NeaZLvSLta7E2wYHeLHupMx8vP8K0jafZeOIKEx9uTeManrZWTVMBOBVzive2vEeb6m14qc1L+d6XuGsXKWfOEDByZBlqVzZ4OnnySNNHmLJ/CiejT1Lfp77F6nYPCSFy2jTS4+Kw96x4/5P5/sIppXaZbsJPK6XW5z4Kq1gptVEpJUqplkqpVuaxXCkVqZTqoZRqaL5GZSvzkVKqvlKqsVJqRTb5TqVUsHntRXOUglIqSSn1oFKqgVKqg1LqVLYyM0x5A6XUzBL/hUpJYko67/x2kKEztuPl4sjiFzrxwp0NyoVxycTVyZ4PBgQzY1g7rsQn0++/G5mx8TQZGTqemSZ/EtMSeS30NVzsXfi8y+c42uUfQSp6wULs3N3x6t0r33sqMo83fRxXB1emHbDsKMajcwikp5OwZYtF6y0rCvyVM0cQfiJS/BVgDXvPRdN30ga+33KWESF1WfpSCMGBxd80WVZ0b+LPytFd6NKwGu//fpihM7dzKVbHM9PciFKKD7d+yMnok3za+VP83fPf05IeF0fsqlV49e2LnZtbGWpZdlRxqcKDjR5kxekVnIs7V3iBIuJ6663YubtX2GmyojxGnwE2icjbIvJq5mFlvSo0qekZfLXmbx6YvJmkVGPT5Nv3lm7TZFlRzcOZqUPa8dHAYHaciaLXhL9YeTDc1mppyhmLTyxmycklPHPrM3QM7FjgvbHLlqMSEyvF3peCGNp8KHZix8yDlpswEUdH3O6ouFkui2JgLmBEUbYDPLMdmjw4ERHPA5M3M+nP4/S3wqbJskBEeOy2W1j2cmdqV3Xj2R928+aCfcQn63hmGjgWdYyPtn3EbQG38WzLZwu9P3rhQpwbNcLFyq62tqa6W3UGNBjA4hOLuZRwyWL1eoSEVNgsl4Xug1FKvQcgIu5KqQTrq1SxWLznPONWHeN8dCLe61cRn5SGl6sjkx9rQ58WFTttTn0/DxY+15GJfxznm9ATbD0VxfiHWnEu6lpWnwO3ruWNXo3zjOysqXzEp8Tz2vrX8HLy4tPOn2JvV/CoPOnY3yQdOID//42pkIEti8vw4OH8evxXZh+ezZvt37RIndezXG6ocB54RQl2eYeIHMYI9YKI3Coi31hdswrA4j3nGfPrgawUxjGJaSjglbsaVnjjkomjvR2v92rMvJF3kJ6hGDR5M6//si+rz+ejExnz6wEW7zlvY0011kYpxdgtYzkXd47Pu3xONdfCR+bRCxcgjo549etXBhraniDPIO6pew8L/l7A1aSrFqnTKSjIyHJZAcPGFGWKbAJGbK9IAKXUPqCLFXWqMIxbdYzEXPtGMhR899dpG2lkPTrUrcqK0Z1xcbQnLZd3WWJqOuNWHbORZpqyYt6xeaw6s4qXWr9EuxrtCr0/IyWF2N+W4HFXDxyqlOtYsxblqRZPkZSWxJzDcwq/uYhkZblMTrZYnWVBkXxllVK53SL0bjzggvkUX1R5RcfLxTHfjZiVtc8ag4NXDvL5js/pEtSF4cHDi1Qm/o8/SI+JqXBZK0tLPZ963HXLXcw7Oo+4lDiL1OneOQSVlFThslwWxcCcE5GOgBIRJxF5HXO67Ganpo9rseSVgZuxzzc7MckxvL7+dfxc/fio00dFzuAYvWAhjjVrVvisjCXhqRZPEZcax8/HfrZIfe4dOiCOjsRXMHflonxTngVewIhGHAa0Mt/f9LzRqzGuuVyPXR3teaNXYxtpZH3y6jNA10YVy1NOUzDLTi2j54KevHT2JXr80oML8Rf4ousX+Lj4FKl8Sth5EjZvxvv++xErRwIujzTzbUanwE7MOTyHxLTSj+7t3Nxwbdu2woXvL0qwyytKqceUUv5KqepKqceVUpFloVx5Z0DrQD65vwWB5tN7oI8rn9zfolJ7VOXuc4C3Cw2ru/PT9nN8/efxCumrr8nJslPLGLt5LOEJxv6n5PRk7O3si7WBMObXX0EEn/sHWkvNcs/IFiOJSopi4d8LLVKfRwXMclmom7KIzCTvUPdFm4it5AxoHciA1oE3VSj33H1OScvgrYX7+XLN35yPTuSDAcE4lqNQOJriMXH3RJLSc0ZwSMtIY+LuifSt17fQ8io9nehFi3Dv1AnHmjWtpWa5p41/G9r6t2XmoZkMbjwYJ/vSBURxDwmBL74kYeNGfB6430JaWpei/Ar8Diwzjz8BLyDemkppKhZODnZ8OfhWXryzAfN2nOOp2TtJ0JsyKywXEy4WS56bhM1bSAsPr/Q794vCyBYjibgWwZKThQagL5SsLJebKs40WVGmyBZmO34EBgPB1ldNU5EQEV7v1ZiPB7Zg44krPDRlCxFxOo5ZRaSGe41iyXMTvWAB9lWq4NG9uyXVqpDcUfMOmvs2Z/qB6aRllO6hS0Tw6NiJhE2bUekVw5G3JPMYDYHallZEUzl49LbaTBvSjpMRCQz832ZORFjGTVNTdjTwvjH1r4u9C6PajCq0bFpUFHFr1+J9333YOekYuSLC0y2fJiw+jFVnSp+Syr1zZ9JjYkg6ZNnkZtaiKDv540QkNvMVWAr8y/qqaSoqdzapzs/P3E5yWjoPTN7C9tM2TcWjKQarz6xmw4UN3F7jdgLcjWgUAe4BjO04tkjrLzFLlkBqqp4ey8adte6kgU8Dph2YRobKKFVdWVkuK4g3WVGmyDyVUl7ZXhsppSzjFqGptLQM8mHR853w9XDi8Wnb+H1/uclYrcmHUzGneHvT27Ss1pL/3fU/Vg9azde3fM3qQauLtrivFNELFuBya0ucGzYsA40rBnZix4gWIzgRfYJ159aVqi6HKlVwad68woTvL8oIpk1BR1koqamY1Krqxq/PdeTWWt68+NMepv51Srsxl1OupV7jlXWv4GzvzJfdviyRx1PSvn2knDh50+3cLwq96/QmyCOIqfunlvp/wD2kE4n79pEeG2sh7axHUdZgvgG2AlOAqcA2YBLwJSVPp6y5SfBxc2LOiNvo2yKAj5Yf4b2lh0nXmTLLFUop3t38Lmdiz/B518+LvJifm+iFCxE3N7z63GNhDSs+DnYOjGgxgkORh9hyoXTZKT1CMrNcbrWQdtajqAnH2iql2iml2gKtgRNKqTuVUtpNRFMoLo72fP1Ia54KqcuszWd4/sddJKZUDC+Ym4Efj/zIyjMrean1S9wecHuJ6shISCB22XK8evfG3sPdwhpWDu6rfx/V3aoz5cCUUtVzPctl+V+HKYqBaaKUOpD5Ril1ECNcTIGIyAwRiRCRg9lkY0XkvIjsNY97sl0bIyInROSYiPTKJm8rIgfMa5PETCohIs4i8rMp3yYidbKVGSoix81jaBH6qLEydnbCf+5txrv9mrH68CUenbaVyPiKFRm2MrInYg9f7vySbrW6FTmIZV7ErlxJxrVrenqsAJzsnRjWfBi7Lu1i96XdJa4nM8tl/Kbyn+WyKAbmiIhME5FuItJVRKZStGCXs4DeecjHK6VamcdyABFpBjwMNDfLfCMimQGvJgMjMdyjG2arcwRwVSnVABgPfGbWVRV4F7gN6AC8KyI3T6zwcs6Tneoy+bE2HL4QywOTN3Pmis5hZyuuJF7htdDXCPAI4KOQogexzIvoBQtxqlcP19atLKdgJeSBhg9QxbkKUw9MLVU9HiGdSbsQTsrp8p0apCjfqCeBQ8AoYDRw2JQViFLqL6Co/qn9gXlKqWSl1GngBNBBRAIAL6XUFmWY6u+BAdnKzDbPFwA9zNFNL2CNUipKKXUVWEPehk5jI3oHB/DT07cRk5jK/ZM3s+cfyyRm0hSdtIw03lj/BnEpcYzvNh4vJ68S15V88iSJe/bg88ADN0XWytLg5ujGE82eYOP5jRyOPFzieq5nuSzf02RFcVNOUkqNV0oNNI/xSqnSbNF+UUT2m1NomSOLQCB7JL0wU5YZwTm3PEcZpVQaEAP4FlCXphzR9paqLHyuIx7ODjwydSurDxUtDInGMkzcPZGdl3byzh3v0Lhq6aJ/Ry9YCA4OeA/obyHtKjcPN3kYT0dPph2YVuI6nIICcapTp9zvhyk02KWFmQx8gBE88wMMT7ThQF6PPaoAOSUskwMRGYkx/Ya/vz+hoaEFqF4w8fHxpSpfEbFEn1+/Fcbvhmfm7OKxpk7cdYujZZSzApXlM96bsJdZV2bR2aMznuc8CT0XesM91S+tp96pOXRNvkzSFj9O1XuCCP+uN1aWlobfgl9IaRHMxgMHbrxeASmLz7mja0dWn13NvDXzqOFYMq89z7p1cN24idA1a8CxdP831upzmRoYpdSlzHNzLed3820YUCvbrUHABVMelIc8e5kwEXEAvDGm5MKAbrnKhOajzxQM92vatWunShMN+WaKppyJpfp8151pvDx3Dz8cicC9ehD/6tUEO7vyN9Vik894/3z4832ICQPvIOjxDrQcXOLqTsec5q1lb9GyWksm9J6Q936X/fNh02RINfKYuCRfptmJyTRr2vSGtmNXr+Z8XDwNn30Wj655GKAKSFl8zrcm3cpfC/9iv8t+Hu78cInqiAPC1oXSzs0Nj06dSqWPtfpcZAMjIl6AUkqVOLiUiAQopcLNtwOBTA+zJcBPIvIVUBNjMX+7UirdDFFzO8b+myHA19nKDAW2AIOAtUopJSKrgI+zTb/1BMaUVGeN9XFzcuC7J9rx7pKDfLf+FBeik/jiwZY4O9yY2OymYv98WPpy1g89MeeM92D80KenQloSpCZBWqL5ah6pidnOjevXUuJ45fR8nNLT+NK+Jk6r38lWLhHSko1y/2yB9JScuqQmwsq3ILAtVKkDdsZnE71wIQ7+/llrApqiUcWlCg80fIC5R+fyfKvnCfIMKrxQLjKzXCZs3FRqA2MtipIPph0wE/A03ko0MFwpVWByaBGZizGSqCYiYRieXd1EpBXGlNUZ4BkApdQhEZmP4UCQBryglMrcKPEchkeaK7DCPACmA3NE5ATGyOVhs64oEfkA2GHe975SSgfDKufY2wkf9A8m0MeNz1YeJSI2iSlPtMPbrfxOmVmUjAxIiICY84YhiT0P6z6+blwySU2EX0fComdBFX0vkQLG+vly2t2N7y5GUOP0THB0BQcXcHQxXh1cDFlu45LJtUj4ug04uIJfY1Kd65Hw1zZ8B/dE4i+CV03Qi/xFZljzYfx87GdmHpzJ23e8XezyObJc/utNK2hYeooygpkBPK+U2gAgIiEYBqdlQYWUUo/kIZ5ewP0fAR/lId9JHukBTEeDB/Opa4apt6YCISI8160+NX1ceP2XfQz6djMzn2xPUBU32ypmTlN1jQmDPSWcpkqKNaa5YsIg1nyNOZ/t/XnISC1iZQpCRhs/9LmNg4NznvKfzq5kxYFvGdXyOW4f8kzWCCRPxgcbRi43HtWhx7sQcQQuHSJm1WZQdvgkfA/jZ4KLN1RvBtWbmq/muVvV4v2tbhL83f3p36A/i04s4plbn6G6W/Vi1+HROYSIcV+QeukSjv7+VtCydBTFwMRlGhcApdRGEdEx2DVWo3+rQKp7ujByzk4GfrOZmcPaExzobRtlsk1TCdw4TQWQlmKMOGJNgxFzLpvxMF+Tc8WNEnvwCgTvQAhqD80GGGss2Y9vO+f9Q+9dyzByRWRPxB6+ODjN2EzZ6lkobL9Lj3dyTs2BYbx6fpTVZ5WRQfT3vXBr74/Ti89BxGHzOAIHFkJytuc7jxrXjY6/aXT8moBTPjv+LbzuVJ4ZHjycRccXMfvQbN5o/0axy7uHhMC4L0jYuKlcZrnM18BkC2S5XUS+A+ZijLQfIp9Fc43GUtxR35eFz3Vk2IztPPTdFr55vC1dG/mVvSJ/vp/3NNWSl2Hbt8aPYHwENzgqulUzjEfVelCns2k0Ag3j4B0EHv4FjyIg/x/6YhiXEm2mzPwx//N9VEwYkseP/LXt20kNC8Nv1Cio08k4MlEK4sLhUjajE3EYdk431oQAEKhyC1Rvbhof0wCF74Vlr+a/7lTJqOVZiz51+zD3yFxWnVlFxLUIarjXYFSbUUWKYO3cqBEOfn4kbCqfaZQLGsF8mev9u9nOy3d8Ak2loJG/J4te6MSwmTsYPmsHD7YNYsPxK1yITqSmjytv9GrMgNZW2uKUkQEX9+U9ggBjUTxzSsi7lmk8goxzr5qGISgt2X7oS/I0n30z5eS7JhdvM2XLwdByMOvz8S6K/mUBdt7eePa8+8ayIsbfwKsmNLzrujwjHa6eyWl0Lh2Gv1cWvJ6Ummj8DSqhgQFo6NOQ39XvXLpmONmGJ4QzdvNYgEKNjIjg3qkT8evWodLTEfvy5RiTr4FRSt1ZlopoNHnh7+XC/GduZ9Dkzczbcf3H/nx0ImN+NfZdWMzIJMfByXVwfBUcXwPxl/K/17sWPLHIMu0WhPlDXxIyN1N+HPJxqTdTZic9Joa4NWvwefBB7Jydi17Qzh586xtH037X5WnJcOW4YXR+fSrvsjFhecsrAfOOzbtBlpSexMTdE4s0inEPCSFm8WKSDh7E9dZbraFiiSmKF5kz8ABQJ/v9Sqn3raeWRnMdTxdH4pJvzGeemJrOuFXHSm5glILIE/D3Kji+Gs5uNhbanb2hQXdo2AtSrsGaf5dqmsoWrDm7hlmHZvFQ44foV79f4QWKQczS31EpKZbLWungDDWCjePP9/IeNbr6GKNKu5LHSyuvXEzIO4pFfvLcZM9yWeEMDPAbRhiWXYAOf6uxCeHReUcnuhCdmKc8X9KS4cxGw6D8vQqumsEC/ZrCHc9Dw55Q6zawz+Ye7eJZ4HpEeeN0zOmszJRvtres+2pW1srmzXFp2tSidQN5rzuJHSRehVl94d7xUL2J5du1ITXcaxCeEJ6nvChkz3Lp98ILllavVBTFwAQppXSwSI1Nqenjyvk8jImbkz0JyWm4OxfwVY69YBqU1XAqFFITDPfdul3gjhcMo1LllvzLF7IeUZ7IzEzpZOdU4syUBZF06DDJR49S410rjeDyWnfq/jakJ8Pqt+HbEOg0Crq8bpl1rnLAqDajGLt5LEnp1x+iXOxdGNVmVJHrcO8cQuSUqaTHxmLvVfLApZamKAZms4i0yJ4TRqMpa97o1Zgxvx4gMfX6YrC9nZCQkk6fiRv4cvCttK9j7rfISIewncZayt+r4ZL51fWuBbc+DI16GZ5dTjbeX2NhlFKM3TyW07Gn+e7u70qcmbIgohcuQJyd8epb+NpAiclv3alRH1j9H9jwBRxcCPd+BfUrfs7DzHWWibsnEp4Qjr3Y887t7xRp/SUTj5AQIid/S8KWrXj16mktVYtNUQxMCDBMRE5jTJEJRsiYAjdaajSWZEDrQALP/U6t3eOori4TIX6ca/MGGcGDeH3BPp7+bg1jm12kn+sB7E/+CYlRxl6T2rfDXe8Zo5TqTSv1TvOfjv7EijMrGNVmVIkzUxZERmIisb8vw7NXT9s8JXv4wf3fQatH4PdXYc5AaPEg9PrY2ARagelbry996/Xlj7N/8EroKzg6FC+ChWvLlth5eJCwcWOFMzB9rK6FRlMY++fT/sC7QCII1OAyNfa/DUlbCK16Abm2A7uTGUSLF6r+XVRpda/xdOt6c+Sa2xOxhy92fFHqzJQFEbd6NRlxcbbPWlmvGzy3GTZ+BRvHG9Ofd70HbYZWeCeA7rW7U8erDjMOzKDXLb2KnF9HHB1xz5blsrzk5SlKPpizeR1loZxGk0VeGx7TkuHwIuzTk7Dr8jq7755PH4dptD/8IBMutiDVyUa7/8uYK4lXeD30dYtkpiyI6AULcbylNm7t21ul/mLh6AJ3/h88uwn8W8Dvo2Fmb7h0yNaalQo7sePJ4Cc5EnWELRe2FKuse6cQI8vlqVNW0q74VGxzr7l5yHcfhMAzf0H3f9OmUy9WvtKde1sGMOGP49z/zWaOX6rcUY3SMtJ48683iU2JLXVmyoJIOXOGazt24HN/Octa6dcIhv0OAyYbe2m+6wJ/jDXcyyso99a7l+qu1ZlxsHjhFMtjlkttYDTlm8vH4OcnyDd4hHfOMOfebo5MeLg1kx9rw/noRPp+vZGpf50iPaNyBp+YtHsSOy7usEhmyoKIXvgr2NvjPWCA1dooMSLQ6lF4cSe0fNiYNvvmdmOzbAXEyd6JIc2HsO3iNg5cLrpv1fUsl5usqF3x0AZGUz6JPgeLXzB+KE6uhSb9jCjB2Slgw2OfFgGsGt2Fro38+Gj5ER6ZspV/IivuU21e/HH2D2YemmmVzZTZUWlpxCxejEeXLjj6l+PFdHdfGPA/GLbM2Lz54yD4ZRjEVbx03IMaDcLTybP4o5jOnbm2YwcZyeVjy6I2MJryRcIVWDnGyDty4Be47TkYtQ8e/gHum2S4GiPGa79JBW549PN0ZsoTbfnywVs5Eh5L74l/8eO2syhV8Uczp2NO859N/7HKZsrcxP+1gbTLly23c9/a1AmBZzfCnf+Bo8vhv+1h+1TDfb2C4O7oziNNHuHPf/7kVEzR11Q8QjqhkpK4tnOnFbUrOtrAaMoHSbFGgq2JtxpRils+BC/vht4fg3s1456Wg+GVgzA22ngtwm56EeGBtkGseqULbWpX4d+LDjJ05g4uxuQdGaAiYO3NlLmJXrgQ+2rV8OjSxartWBQHZ+j6Bjy/BQLbwPLXYfrdEL7f1poVmceaPoazvTOzDs4qchm39u2zslyWB7SB0diW1ETY/LVhWNZ/Bg16wPPboP9/b1hfKQ01fVz5fngHPujfnB2no+g5fj2L95yvcKOZ7JspP+/6uVU2U2bHLiaG+NBQfAYOQBwrYHZR3/rwxGK4fypE/wNTuhmbNVMSbK1ZoVR1qcqABgNYemoplxIKCLyaDTs3N1zbtS03C/3awGhsQ3oa7JoFk9oY//A1W8HIUBj8veEZZAXs7IQn7qjDilGdaejvyeif9/L8j7uJjC8f89VFIXMz5UutX7LKZspMYpYu5Xj3HlT711uQno6dTwXeTyRijHZf2A6tHzceaP53GxxbaWvNCmVo86EopZhzeE6Ry3iEhJB8/DipF22/9qQNjKZsyciAg7/C/zrA0lFGHpWhvxuh72u2LhMV6lRzZ/4zd/BWnyb8eSSCXhP+YvUh2/8zFkZZbKYEw7iEv/0OaRcukOmQfOXrr4lZutRqbZYJblWNdbzhq8DJA+Y+BD8/bsSqK6cEeQbRu25vfvn7F2KSY4pUJstdeZPtp8m0gdGUDUrB8T9gSldY8CTYO8HDP8GINVC3c5mrY28nPNu1PktfCsHfy4WRc3bx6vy9xCSmlrkuBbHs1DJ6LuhJy9ktGbZyGF5OXlbdTAkQMX4CKinnGpVKSiJi/ASrtVmm1L7d2DvV4x3Dlfm/HWDrt7BvHowPpmvoABgfbKRuLgcMDx7OtbRrzDt6Y96YvMjMchlfDqbJrPYtFZEZIhIhIgezyaqKyBoROW6+Vsl2bYyInBCRYyLSK5u8rYgcMK9NEnOXl4g4i8jPpnybiNTJVmao2cZxERlqrT5qisg/24xQ6z8+AEnRMPA7eG4TNOlr89hgjWt4suj5TrzcvQG/7b1A7wl/sfH4FZvqlMmyU8sYu3ks4QnhKBQZKoP4tHg2hG2wartp4TeGji9IXiFxcILOr8HzW6FWB1j5L1j8LMScQ1DXUzWXAyPTqEojugR14ccjP5KYVnh6ChHBPSSEhM1bUOm29Zyz5ghmFpA7zP9bwJ9KqYbAn+Z7RKQZ8DDQ3CzzjYhk5v6cDIwEGppHZp0jgKtKqQbAeOAzs66qGOmdbwM6AO9mN2SaMuTiQfjpIZjR09hlfc8X8OIuI6JxYfnoyxAnBzte7dmYX5/riJuTPY9P38Y7vx3kWsqNSc7Kkom7J+YI4Q6Qkp7CxN0TrdquQ0BAseQVmqp14fGF4OZrjLKzk5mquRwwIngEV5Ovsuh40bKouod0IiMmhqSDBwu/2YpYzcAopf4ConKJ+wOzzfPZwIBs8nlKqWSl1GngBNBBRAIAL6XUFmW4+3yfq0xmXQuAHubophewRikVpZS6CqzhRkOnsSZRp2Hh00bujrNbjKmIUXuhw9PGk2M55dZaPix7uTNPhdRlztaz3DNxA7vO5v4Klx2lzXRYUjx69LhBJi4uVH9ltFXbtRkicC2fz7mcpGpu49+GVn6tmH1oNqkZhU/june8nuXSlhQlmrIl8VdKhQMopcJFJHNbcCCwNdt9YaYs1TzPLc8sc86sK01EYgDf7PI8yuRAREZijI7w9/cnNDS0xB2Lj48vVfmKSO4+OyVHccvZ+QSEr0aJPedrDeSf2veTlu4Jm3fYTtFiEuIB1dq7MO1AIoMmb6FlNTvOxSuikjLwDV3OA40c6VjTui67qSoVB3EgVd34Y+Jj72O175rdlUh8Fywgo1o1JD0du6tXyahalfj+/bno6QmV9Dt+u3M1XJIv3yBPcq7G1nLS5w50YErCFCYsn0B7j8IDjlatXZsLy5ZzqHnzQu+11u9XWRuY/MhrIl4VIC9pmZxCpaYAUwDatWunSpOtMLQCZDu0GPvn50wh3Pk1uHoGdnxn5LRvOwy6vkltzxrUtrWuJaQb8HjfNJ6evYMtpzKfboXIJMWcI+k0a9qMAa3zfG4pNbEpsYxaO8owMnYOpGVcn6pzsXfhXx3/Rbd63SzerkpN5ezjT5Bsb0/deXNxCgq6eb7XVT++MVUz4FI/pNz0v4vqwtola9mcvpnXu75eaNDRiH37iPxuCp1bt8beu+DI4tb6nMvai+ySOe2F+RphysOAWtnuCwIumPKgPOQ5yoiIA+CNMSWXX10aS7B/vvGPmH0x9PfRsGkCNO0HL+4wMg16WncDYFng4ezAP1E3LqompqYzbtUxq7R5KeESw1YOY+/lvXzW+TM+7PQhAe4BCEKAewBjO44tVqbD4nB50iQS9+0j4MMPcAqy3CbXCkHLwUboIe9aKMTY5BvUAQ4vgm3f2Vo7wAjlPzx4OCeiT7DhfOGOHh4hIZCRQcKWrYXeay3K2sAsATK9uoYCv2WTP2x6htXFWMzfbk6nxYnI7eb6ypBcZTLrGgSsNddpVgE9RaSKubjf05RpLEFeeVkAPPzhgalQtV7Z62RFLkTn7bVzPjqRnWcsuz5zKvoUT6x4gvNx5/mmxzfcU+8e+tbry+pBq9k/dD+rB622mnGJ37iJyKnT8Bk8GK/eN+mSpRmKaH23xfDKIXhyOTS5F1a8CbuLvtHRmvSu25sA9wCmH5he6L1ZWS432W4dxppuynOBLUBjEQkTkRHAp8DdInIcuNt8j1LqEDAfOAysBF5QSmX61z0HTMNY+D8JrDDl0wFfETkBvIrpkaaUigI+AHaYx/umTGMJ8lv0jI/IW17BqenjmqfcTmDQt1t48NvNrDsaUeqQM3sj9jJk5RBS0lOY1XsWd9S8o1T1FYe0y5e58K9/4dywIf7/N6bM2i332DvCoBlGZtQlL8GBBbbWCEc7R4Y2H8ruiN3sidhT4L1Glss7iN+4yWYhkazpRfaIUipAKeWolApSSk1XSkUqpXoopRqar1HZ7v9IKVVfKdVYKbUim3ynUirYvPaiOUpBKZWklHpQKdVAKdVBKXUqW5kZpryBUmqmtfp403FsRf77ViwYN6w88Uavxrg65nSpdnW059P7W/Buv2acv5rIk7N20GfiBn7be5609IxitxF6LpSnVz+Nj7MPc+6ZQ1PfphbSvnBURgYX/vUvMhISCPzqS+xcXMqs7QqBgzM89CPUvgMWPWNEZ7YxAxsMxMfZhxkHCg/l7x4SQlq47bJc6p38msJJS4YV/4K5D4NnIDjk+hEqIC9LRWdA60A+ub8FgeZIJtDHlU/ub8Hg9rV5slNd1r95J18+eCvpGYpR8/Zy55ehzNl6lqTUom1wW/j3QkatG0UDnwZ83+d7annWKryQBYmcOo2EzVvw//f/4dywYZm2XWFwcoNHf4YaLeGXoXBynU3VcXN049GmjxIaFsrxq8cLvNcjpBNguyyX2sBoCubKcZjWwwihf9tz8PIuuO/rbIuhhedlqegMaB3Ipre6M6u3O5ve6p7De8zR3s5IBzC6C1OeaIuvuzNvLz5IyGdr+d+6E/mGnlFK8e2+bxm7ZSwda3Zkeq/pVHWpWlZdAuDa7j1cnjQJr3vuwWfQoDJtu8Lh4mVsyPRtCPMeNfZ32ZBHGj+Cq4MrMw8WPEHjGBiIU926NstyqQ2MJm+Ugr0/wXddIeY8PPIz9PnUmDLIsRhatLwslR07O6Fn8xoser4j80beTvOa3oxbdYyQT9fy6YqjRMRd35GfnpHOh1s/5H97/8d99e9jUvdJuDm6lam+6dHRnH/9NRxr1qTGe2MLdXnVYATLHLIYvGrCT4PhQsFrINbEx8WHQY0Gsfz0ci7EF+wk6x4SwrXt28nIFV+uLNAGRnMjSbHw60hY/JyRrOm5TdD4JvUsKiYiwu31fJk9vAO/vxRC18Z+TPnrJCGfreP/Fh3g74hIXg19lfl/z+epFk/xYacPcbQr2zwrSinC336btMtXCPzqS+w9Pcu0/QqNR3UY8hu4+MCcgXDpsM1UGdJsCCLC94e/L/A+j5BOqORkru3cVUaaXUcbGE1Ozu+G77rAwQVGytkhvxlPbJpiExzozX8fbcPa17oxqG0QC3b/zYCFQ1l7bh3DGo9mVJtRNhk5XJ07l7g1f1D91VdxbdGizNuv8HgHwdDfwN4Z5gyAyJM2UaOGew3urXcvC/9eyNWkq/nedz3LZdmvw2gDozHIyDASMU3vCempMGy5kXK2HAWlrKjUqebOyz39aNR6No5u51GXHufrxTUYOmM7W09FlqkLadKRI0R8+hnuXbtQdeiQMmu30lG1nvHwlZEGs+8zsmXagCebP0lSehI/Hf0p33vs3Nxwa9/OJvthtIHRQPxl+OlBI7Nko17w7Aa4pez2YVR2jl89zmPLHyMy6TLTek1h00uv8Eavxhy6EMPDU7bywOTNrDl8iYwM6xqajIQEzr/6GvY+PtT85BPETv/7l4rqTYxEeclx8H1/iCv7pHX1fOrRvVZ3fjryE9dSr+V7n3unEJKPnyjzLJf6G3azcyoUvu0EpzdA3y/hoR+MxUyNRdh5cSdDVxppb2f1nkX7Gu3xdnXkhTsbsPFf3fmgf3Mi4pJ5+vud9JrwFwt3hZFagr00ReHihx+RcuYMNceNw6Gq/owtQsCt8PgCiLsE3w+AhMgyV2F4i+HEpsSy4O/8N4LaKsulNjA3K+mp8Md7xj+Fiw+MXAftn7J5ArDKxB9n/+CZNc/g6+LLD/f8QOOqjXNcd3G054k76hD6ejcmPtwKezvhtV/20W1cKLM2nSYxJZ3Fe87T6dO11H1rGZ0+XcviPedLpEvMkiXELFpEteeew/22DpboniaTWh3g0Xlw9TT8MBCSipba2FLc6ncr7Wu0Z/bh2aSm5+0W79yoIQ7Vq5d5+H5tYG5Grp6FmX1g41fQZgiMDAX/wkN6a4rOz0d/5tXQV2nq25Q5feZQ0yN/RwkHezv6twpkxajOzBjWjpo+Loxdeph2H67h9V/2cT46EYUR/2zMrweKbWSST58mfOx7uLZrS7XnnytlzzR5UrcLDJ5jeJX9OBhSEsq0+RHBI4i4FsGy08vyvC4iuHfqVOZZLrWBudk4tAi+7QyXj8GgmXDfJGOnssYiKKX4es/XfLjtQ7oGdWVqz6n4uPgUqayI0L2JP78825Ffnr2DtAxFWq51meJGcs5ISeH8a69h5+hI4BdfIA7lJUNHJaRRT3hgGoRtNzZjppbdvpOONTvSpGoTZhycQYbKe4o1K8vlgQNlppc2MDcLKddgycvwyzCo1tBYyA++39ZaVSrSMtJ4d/O7TNk/hQcaPsD4O8fj6pB3sMzCaF+nKilpef9QnI9O5M8jl0hOK/xJNGLcFyQfPkLAJ5/gWKPip1Ao9zQfAP3/Z6xt/jLMmIouA0SE4cHDOR1zmnXn8g5lcz3LZdmtw2gDczNw6TBMvRN2fw8hr8DwlVCljq21qlQkpiUyet1oFp1YxLO3Psu7d7yLg13pRgv5RXIWYMTsnbT94A9GzdvDyoPhJKbcaGzi1q7l6pw5VBnyBJ7d7yyVLppi0OpRuOcL+HuFsWE5o2ympO6+5W6CPIKYfmB6nq7vDlWq4BIcXKb7YfR4uTKjFOycAav+D5y94IlfjdDjGotyNekqL659kYNXDvL27W8zuLFlQue80asxY349QGK2wJmujvZ80L851TydWXHgIqsPX+S3vRdwdbTnziZ+9AkO4M4m1XGOukz4mP/DpVkzqr/+ukX00RSDDk9D6jVY8w44uhnx+6zsFu5g58CTwU/ywdYP2HlpJ+1r3JhW2aNzCFe+/Y70mJhCs1xaRCert6CxDYlXjRwWR5ZC/R4w8FsjzIXGopyPP8+za57lQvwFvur6FT1u6WGxujODao5bdYwL0YnU9HHljV6Ns+TdGlfno/Rgtp2OYsXBcFYevMTyAxdxsVN8vX0qAckp+H78GXZOThbTSVMMOo0yFvvXfwZO7tDnM6t7afZv0J9v9n7D9IPT8zQw7iEhXPlmMglbtuLVu5dVdQFtYCon/2yFhU9BXDj0/BBuf8HqT083I8eijvHcH8+RlJ7E1J5TaePfxuJtDGgdmCN6c24c7O3o1KAanRpU4737gtl19ir/fDGeoLC/+bzto2yc8zedGkRxT3AAdzfzp4q7NjZlSrcxhpHZ8l/DyNz1rlWbc7Z35vFmjzNx90SORh2lSdUmOa67tmyJnacnCZs2lomB0b86lYmMdFg/DmbeY4R4GbEaOr6kjYsV2B6+nWErh2Endnzf+3urGJfiYm8nNL90nOZrF+I1cCAvffIiT3aqy8nL8by5cD/tPvqDx6dt44etZ7kcl2xrdW8ORIyHvLZPGtsC/vrC6k0+1PghPBw98kxIJg4OuN9+O/EbNpZJiCI9gqksxIbDr0/DmQ3Q4kHo+5WRw0JjcVaeWcn/bfg/anvW5tu7v6WGe/nwzkqLjOTCG2/gVKcOAW//h0A3N9rUrsKYPk04dCGWFQfDWXHgIv9ZfJC3fztI+zpVuSe4Br2DA6jhrTNZWg0R4/8xJQHWfgBOHnD7s1ZrztPJkwcbP8jsQ7N5KfYlannlTGLnHhJC3Jo1pJw8iXODBlbTA7SBqbjsnw9/vg8xYeDmC6mJgIL+3xheLHpHvsVYdmoZE3dPJDwhHK+5XsSmxNKmehsmdZ+Et7P1F0qLgsrI4MKYMaTHxFBr2lTs3K7vbRIRggO9CQ705vWejfn7UjzLD4Sz8uBFxi49zNilh2lT24c+wQH0Dq5BrapuLN5znnGrjnE+OpHArWtzrP1oSoCdHQyYbCz8r/yXsfesjfWCjT7R9Al+OPwDsw7N4u073s5xLTPLZfzGjdrAaPJg/3xY+rJpVIBrVwCBu9+D1o/ZVLXKxrJTyxi7eSxJ6camudiUWOzEjgENBpQb4wIQNWs2CX9toMa77+DSuHG+94kIjWt40riGJ6/c3YiTl+NZefAiyw+E89HyI3y0/AhBPq5cjE3K2uSZGUEA0EamNNg7wKAZMPcRY0+aoxu0sE4mUT83P/o36M/iE4t5rtVzVHOtlnUtM8tlwsZN+A4bZpX2M7HJ5LyInBGRAyKyV0R2mrKqIrJGRI6br1Wy3T9GRE6IyDER6ZVN3tas54SITBIzuYaIOIvIz6Z8m4jUKfNOWpM/379uXLJQsH2qTdSpzEzcPTHLuGSSoTKYvG+yjTS6kcT9+4n46is8774bn4cfLlbZ+n4evHBnA5a93Jm/3riTMX2acCkuKc8IAp+vOmpJtW9OHJyNgLK3dDT2yBzNO7SLJXiy+ZOkqTR+PPLjDdfcO4dwbccOq2e5tOXq751KqVZKqXbm+7eAP5VSDYE/zfeISDPgYaA50Bv4RkQyk5RMBkYCDc0jM+3iCOCqUqoBMB74rAz6UzZkpEPMubyvxYSVrS6VnDMxZwhPCM/z2sWEsg/NnhfpcXGcf/U1HKtXJ+DDD0qVwKy2rxvPdK1PWnrei78XopN4df5elh8IJy6pbHaoV0qc3ODRn6FmK2O3/8m1Vmmmtldt7r7lbuYdnUdcSlyOax4hIWWS5bI8uRf1B2ab57OBAdnk85RSyUqp08AJoIOIBABeSqktynCH+D5Xmcy6FgA9pDT/eeWFyJNGkMr88A4qO10qMVFJUXy87WMG/jYQIe+vTXlY2FdKEf7OO6SGh1Pzyy8stnEuvwgCro72rD0awfM/7qb1+2t4bNpWpm88zZkrZRvYsVLg7AmPLYBqjWDuo7D2QxgfDGN9jNf98y3SzPDg4cSnxvPL37/kkLu1b484OVl9V7+t1mAUsFpEFPCdUmoK4K+UCgdQSoWLSOauwEBga7ayYaYs1TzPLc8sc86sK01EYgBf4Ep2JURkJMYICH9/f0JDQ0vcofj4+FKVLxClqHlhBfVPziLDzoFLNfsQcHEt9hnXXU3T7Zw5VvNBIqylQx5Ytc82ICUjhdC4UNbErCFFpdDRoyMBjgEsjl5Mqrr+xO4ojtztcrfN++66YSNeK1YSN2AA22NiwEL69K2dzqxYSMkWCs3JDp5oas9tNRw4GWPP3oh09l2M4oMTkXzw+2FquAut/BxoVd2eBj52ONhV3Oe5svxeO9Z/g3ZRo3D6a9z1R5mYc6QvfpFjR44Q4d+11G00cWnC9L3TqXW5Fo7imCX3qVePiFWrOHL7bVbrs60MTCel1AXTiKwRkYImd/P6pqoC5AWVySkwDNsUgHbt2qlu3boVqHRBhIaGUpry+RJzHpa8aAyj63fH/r7/EuQdmNOLzDsI+x7v0KzlYJpZXoN8sVqfy5gMlcGyU8uYtGcSFxMu0q1WN15p+wr1vOsB0PZU2ywvsgD3AEa1GUXfen1tqnPy8eOcXrAAt44dafLxRxbNTtkNaJbdiyxXBIHssQrORV3jzyOX+PNoBGtPRbHyTCpeLg50bVydHk2q07WRX4Xb3Fnm3+sDb0FqdA6RfUYyzS78QrOHSr8x0zXcladWP0V0YDQPNnowSx558hQR48bRqXFjNh07ZpU+28TAKKUumK8RIrII6ABcEpEAc/QSAESYt4cB2R25g4ALpjwoD3n2MmEi4gB4A1HW6o9VUAoO/ALLXzcisvb9EtqNuO5+3HKwcWhKxY6LOxi3YxxHoo7QzLcZH4d8fEOIjb71+tK3Xt9yY1AzEhM5/+qr2Hl6UvPzz6yS+jgzgkBhfa5V1Y1hneoyrFNd4pPT2Hj8CmuPXmLt0css3XcBO4G2t1ShexN/7mpanQbVPUq1TlQpyS/VsoXWVDvU6ECwbzCzDs7i/gb3Y29nLGG7h4TAuHFGlstq1QqppWSUuYEREXfATikVZ573BN4HlgBDgU/N19/MIkuAn0TkK6AmxmL+dqVUuojEicjtwDZgCPB1tjJDgS3AIGCtKottq5Yi4Qr8/gocWQK1bjP8533r21qrSsWp6FOM3zWe0LBQarjX4JPOn3BP3Xuwk/K0LJk3lz7+hOTjJ6g1fRoOVvphKAkezg70Dq5B7+AaZGQo9p+PYa05uvls5VE+W3mUWlVd6dHEn+5NqnNbvao4O9gXXnFlxzsob8cdr/yT1BUHEWFEixG8EvoKf/zzB73qGI6417NcboIB/S3SVm5sMYLxBxaZTzEOwE9KqZUisgOYLyIjgH+ABwGUUodEZD5wGEgDXlBKZYaXfQ6YBbgCK8wDYDowR0ROYIxciue7aUuOLjf2uCTFwF3vmaFe9D+hpYhMjGTyvsks+HsBrg6ujG4zmseaPoaLQ8XYyR67fDnRv/yC79NP49Gpk63VyRc7O6FVLR9a1fLh1Z6NCY9JZN3Ry/x55BJzt//DrM1ncHOyp3PDavRo6s+djavj5+kMkLXJM68An5WSHu/k3NeWSVoyhO+DgFtL3UT32t2p41WH6Qem0/OWnoiIkeUyJIS4P/+E+/qVuo28KHMDo5Q6BdzwF1NKRZJzejf7tY+Aj/KQ7wSC85AnYRqoCkNSLKwcA3t/AP8WMOQ3ncbYgiSmJfLD4R+YfnA6SWlJDG48mGdvfZaqLlVtrVqRSTl3jvB33sW1VSv8Xn7J1uoUiwBvVx69rTaP3labxJR0tpy6wp9HIlh7NIJVhy4BcGstH2p6u7D2aATJZrK1m2KTZ+ZUd7Y1VVo9CrvnwLS7oc+nRiyzUkwt2okdw4OH887md9gSvoWONTsCIK4uZMTEUP35FzhesybVXxmNdz/LGRu9k788cPovWPwCxIZB59eg61vgULEWRssrGSqD30/9zqTdk7h07RLda3VndNvR1PWua2vVioVKSeH8q6+BnR01v/gCcXQsvFA5xdXJnu5N/OnexB+lFEfC47IcBVYcvHE9IjNNdKU1MJD3mmqHkcZmzN9fgbNb4N7x4OxR4ib61uvLf/f8lxkHZtCxZkdili4lZuFCwPCKSrtwgfC33wGwmJEp/xPOlZnURFjxFszuB/aOMHy1MVzWxsUibAvfxkO/P8S/N/4bP1c/ZvaaycTuEyuUcYlZupTj3XtwtOWtJB04gFf//jgFVZ4fWhGhWU0vXurRkMUvdMpn15Exkvlo2WHW/305z+ydlRL3asZemTv/AwcXwNTuEHGkxNU52TsxpPkQtl3cxsErB4kYPwGVlDOqtkpKImL8hFIqfh09grEVYbtg0TMQedx4UrlrrJEvQlNqTkaf5KtdX/FX2F/UdK/JZ50/o3fd3hViAT87MUuXEv72O6hs4TxifvkFt5YtLDqNUZ6o6ePK+ejcYZDAycGO2ZvPMnXDaZzs7Wh7SxVCGlYjpEE1ggO9sa/A+24KxM4Our4BtToYOZ6mdjdGMreWbFl5UKNBfLf/O2YcnMHT4XlHqUjLR14StIEpa9JTYf3nsOFL8KwBTyyG+jpfuiW4kniFb/Z+w8LjC3F3cOfVtq/yaNNHcbZ3trVqJSJi3Bc5jAtcf8KsrAYmvzTRn9zfgp7N/dlx5iobj19m44lIxq06xrhVx/B2daRjfV86NahG54bVqF3VrfK5QtfrCs9ugAUjjAfTs5ugz+fgmHfUhfxwd3TnkSaPMHX/VJ7294OLETfc4xAQYCmttYEpUyKOGF+O8H1w6yPQ+1Nw9bG1VhWexLREvj/0PTMOziAlPYVHmjzCMy2foYpLlcILl0MSDxwgcuo00iJu/OcHyz5hljcKSxPdtZEfXRv5AXA5LpnNJ6+w8fgVNp64krV+E1TFlZAG1QhpWI2O9atRtYJt9MwXzxqG80/ox8YD6vk9MHh2sbcwPNb0Mb4/9D2b+9Wl05zYHA8x4uJC9VdGW0xlbWDKgox02PI/I96Qs6cRTbVp5XwCtSaZeVkuJlykhnsNXmr1Eumk8/Xur4lIjOCu2ncxuu1obvG6xdaqFhulFAmbNhM5dSrXtm3DzssLOw8PMuLjb7jXkk+Y5ZHC0kRn4ufpTP9WgfRvFYhSitNXEth4wjA4y/aHM2/HOUSgeU0vY3TTwI92darg4liB3f7tHYx12lq3w6KR8F1X6P81NB9Y5CqqulRlYMOB/Ff9wt3/eY3kb2aSeuECjtqLrAISdRoWPw//bIYm98K9E8DDz9ZaVThy52UJTwjn35v+jULRoloLxnUdVy7SFhcXlZZG7KpVRE6fTvLhIzhUr071N9/EZ/Bg4tetvWENxtJPmJUFEaGenwf1/DwYckcd0tIz2H8+Jmt0M2Pjab5bfwpnBzva16maNZ3WLMALO3P9pkIlWWvUE57ZAAueNCIyn91ipGYuooPQ0OZDmX9sPr/UucRra/+0WpQKbWCshVKwaxas+rexUXLAt8bCXGWbGy4j8srLolD4OPvw4z0/Vrg594ykJGIWLyZy+gxSz53DqW5dAj76EK9+/bBzMn4kMp8kI8ZPIC08HIeAAIs/YVZWHOztaFO7Cm1qV+HlHg1JSE5j++morBGOEVkAqrg50rF+Ndyd7flt74WKtf/GpxYMWw5/vAtbv4HzO+HBWeBTu9CigR6B9K7bm/nH5vNUi6espqI2MNYgNhyWvAQn1kDdrtD/f8aXQVNsrqVeY0v4lnzzssQkx1Qo45IeG8vVn+YSNWcO6ZGRuLRsSfU338CzR488Y4p59+unDYoFcHd24M4m1bmziRGkPSI2iU0nr7DxeCQbT1zmUmzyDWUqxP4bByfo/QnUvgN+ewG+7QwDv4PGvQstOjx4OMtOLaPPwj7EpcYRsMDygVy1gbE0BxbAsteMMA99Pof2Txuuhpoicz7+POvPreevsL/YfnE7qRmpCIK6MSB2ucjLUhRSL0UQNXs20T//TEZCAu6dO+P71FO4dWhfoQxkZaG6lwsDWwcxsHUQSinqjVmex7fLGMl8tfoYIQ39aF3bB0f7cvq/3Ow+qBEM84fC3Ieg02jo/raxZpMPx68ex07siEs1kpGFJ4QzdvNYAIsZGW1gSosZNr9rzDnY5GpsngxsZzxFVGtga+0qBOkZ6ey/sp/159azPmw9J6JPAFDHqw6PNHmEbrW6cSH+Ah9u/TDHNJmLvQuj2oyyldpFIvnUaSJnTCf2tyWo9HS8+vTB96kRuDRtamvVNCYiku/+G0d74b/rTjBp7Qncney5vZ4vnRtWI6ShH/X93MvXw0HVejBiDax8CzZNgHPbYdD0fINmTtw9kQyVkUOWlJ7ExN0TtYEpF+yfnxWkTsAwLnaOxqhFG5cCiU2JZfP5zawPW8/G8xuJTo7GQRxo69+Wge0G0iWoC3W86+Qo42DnkMOLrDzkZcmPxP37iZw6jbg//kCcnPB5cBBVn3wSp1p6qrQ8UtD+mzubVGfLyStsMB0G/jxquI/X9HYxNns29KNTfV98PcrBfitHF+g3AW7pCEtHG1NmD0zLc69dfmm/LZkOXBuY0vDn+zdGQM1IhXUfQquKE8C5rDgTc4b1YcYoZfel3aSrdHycfegc2JkutbrQqWYnPJ088y2fmZelvKKUImHjJiKnTctyNfZ99hmqPv44Dr6+tlZPUwDZ99/klWStd3AAvYMN9/B/Iq+x4cRlNh6/wsqDF5m/08jbEhzoRUgDPzo3rEbbW2zsDt1ysBGFef4QmDMQur0FXd7IEZm9hnuNPNc2LTntrA1MacgvIZCFEgVVdFIzUtl9aTfrw4z1lLOxZwFoWKUhTwY/SdegrrSo1iIrAVJFJcvVeNp0ko8cwcHfP8vV2N5Dh/+pKBQ1yVptXzce872Fx267hfQMxf6waDYev8KGE1eYtuEU364/iYujHR3q+tLZ3PDZpIZn2U+n+TWGp9fC769C6Cfwz1a4f2rWNolRbUblcP0Hy087awNTGvJLFOQddKOsEpG54TE8IfwGz5OrSVfZeH4j68PWs+n8JuJT43G0c6RDQAcea/oYXYK6EOhRjr1yikFGUhIxixYROWNmNlfjj/Dudy/iVEl2j2sKxN5OaF27Cq1rV+GlHg2JT05j26nIrOm0j5YbwSn9PJ0JMffehDSoRnWvnPmHrJYDx8kdBn4LdTrB8jfgu84waAbc0jHrf9aa6cClIiV6tCbt2rVTO3fuLF6hbGswWTi6Qr9JlTadce4NjwDO9s50C+rGpWuX2Hd5HwpFNddqdA3qSpegLtwecDtujm421Lp0xCxdSsT4CVm7nas9+wzpUVevuxrf2pJqTz+NR/fuVklfbEvKS5rossSSfQ6PSTSMjWlwohJSAGjs70lIQ8PgRMQm8+6SQ3mu/1jURfriAWPK7OpZIxpAx5ezPFxL02cR2aWUapfnNW1gDEpkYCDLi0zFhCHeQcYHV0mNC0DPBT3z3ZPSzLcZXYO60jWoK019m1a46MV5kVdE40zcu5iuxu0rr6uxNjCWIyNDcTg8lo0nrrDh+GV2nLlKSlpGvvcH+riy6a3ullUiKRaWvAiHf4NGfYyIABu+KtXvlzYwRaDEBsaksvwjpmekcznxMmFxYVxIuMD5uPOExYdxPv48F+Iv5GtcBGH/0P1lrK3lyUhIICXsPKlh50j55xxXvv6ajGvXbrjP3s+PRhv+soGGZUtl+V4Xh7Lqc2JKOjvORDFkxvZ87xnQqiYNqntkHbWruuPkUMoHN6Vg+xQjFxXKPExKMANTkIHRazAVlNyBH4s6d6qUIjIpMstgnI8/T1jcdQNyIeECaRlpWfcLgp+bH0EeQbT1b0vouVDiU28MwFhRNjyqjAzSLl0i5dw5Us+FkRJmvKaeO0dKWBjpkZFFqif9yhUra6qp7Lg62dOlkR+B+ezBcXawY8eZqyzeeyFL5mAn1PZ1o4GfRw7DU9/PA3fnIv6ci8Btz8CGLyA+V8Tu1ETDO9ZCszDawFRA8gr8mH0HbkxyTIEGJDEt55e5qktVAj0CaerblLtuuYtAj0CCPIKo6VGTmh41cbJ3yrdtKLsNj5lrIYXF5UqPTyD1vGk0Mo3HuXOknjtH6vnzqNTU6zfb2+MYEIBjrSA8u3fHsVYtnGoF4VirNk61gjg18H7SLly4oY3KHtFYU3YUtAdnQOtAEpLTOHU5gROX4zgREZ91rD0aQVrG9dFHTW8X6mczOplGKN/9OfGX85Zb0Au2UhsYEekNTATsgWlKqU8t3Ubo9PdxnDIfv5h0NnnbkzpyMN1GvGORujNUBklpSVxLu0ZiWiKJaYkkpSXx+Y7Pabs/gUdDFb6xEOkFP3VL4D/qP3y09aOs0A+ZeDp6EugZyC1et9AxsGMOAxLoEVisBfi+9frivm4XjlPm4xOTTrS3Pakj+9HNyvtTcq+FpF24QPi//0PC9u04+PrmGI2kR0XlKGvn6YlTrVo4N26M59134RiUaURq4VijRoH57au/MlpHNNZYlcJy4Lg7O9AiyJsWQd45yqWkZfBPVEIOo3Picjxzt/9DUur1tZ0qbo45RjqZ51Vda+CWeOOU9zXXGljKJafSGhgRsQf+B9wNhAE7RGSJUuqwpdoInf4+PhPm4mw+EFeNSSd5wlzmX7tMzfsfyjIKianma3qu97mOpLSknO/Tb1xYBuh0KJ1nlitczJksv1h4ZrkCkql1/yCCPIMI9AjMMiDezt551lMQSiljrlYpyMgApVBKEfP779T4ehEqKT2rzzLpV65k1MSjU0dUSgoZKSmolFRUSjIqJSXryEhJQSWn5JAZ8sz7Uq/Lkw1ZRqohTz5+HNLScuqYkkLMLwuyRiFOtWvhctddONYKwqlWrSxDYu9d/P5nkj2isbVyZmg0Rc2Bkx0nBzsaVPekQfWcm5MzMhTnoxM5cTmek9mMz4qDF4m+dn303t9uIJ84TsNNUrJk15QTn6c+xNhS9eY6lXaRX0TuAMYqpXqZ78cAKKU+yev+kizyb7otmKox6TfI0+zgYrZkipLtTywiCIKd2GGHmO+NczsRROzM68Z9mfdePxe4eBn7PJxP0gWcq/ubxiDDWLvLyMhhIHIbjPyuURbfC0dH7BwdEScnxNnZeM06HLFzui6LX7cu7zpEaLJ/X4GjEEuhF7xvDiprn5VSRCakZBmc/yw+yH12G3nTYT41JZILypfP0wazNCOE058WfUbiZl3kDwSy74IMA27LfoOIjARGAvj7+xMaGlqsBvzyMC4A9hngWrsxgh322BvGROwMM5LDnTWna6vK09NVyGFLRHC5kPfcqZ2CuPr1USLGQl7WAYgdCMY1BOzMa5nniFku8948yongsfg38lJTATHPPoNycEA5OICDI8ox5znmNeXoCPb2xYoyXW3fPuxzTX0BpFepwvpNm4pcT2mIj48v9nekoqP7XDkJAnxdhCVJISxJCclxzddFLNb/ymxg8vsdvP5GqSnAFDBGMMV9atnkbZ/nCOaqtz0hcxcXq67icLx7jzwXnh1r1qTZjOlWaxfg+PYd+bZ9x+jRVms3Zsxbea6F1BrzFsFl9LRZWZ9sC0L3ufLytvf5PJ0L3u7fgm4W2uBZ8XfC5U8YkD10bRBw4y9jKUgdOZjkXDMzyY6G3JpUf2U04pIz1ERZLTzbqm3vfv0I+OB9HGrWBBEcatYk4IP39VqIRlNCBrQO5JP7WxDo4woYGzstHT2gMo9gdgANRaQucB54GHjUkg10G/EOoZDLo8pyXmT5YctUurZc9NbZHTUay1LUAJ8lpdIaGKVUmoi8CKzCcFOeoZQ6ZOl2uo14B0a8U+bDalv+2Ga2fbNMJWg0mpJRaQ0MgFJqObDc1npoNBrNzUhlXoPRaDQajQ3RBkaj0Wg0VkEbGI1Go9FYBW1gNBqNRmMVKm2omOIiIpeBs6Woohpws8Vwv9n6fLP1F3SfbxZK0+dblFJ+eV3QBsZCiMjO/OLxVFZutj7fbP0F3eebBWv1WU+RaTQajcYqaAOj0Wg0GqugDYzlmGJrBWzAzdbnm62/oPt8s2CVPus1GI1Go9FYBT2C0Wg0Go1V0AZGo9FoNFZBG5hSIiK9ReSYiJwQkbdsrY+1EZFaIrJORI6IyCERGWVrncoKEbEXkT0i8rutdSkLRMRHRBaIyFHz877D1jpZGxF5xfxeHxSRuSLiUnipioWIzBCRCBE5mE1WVUTWiMhx87VKQXUUFW1gSoGI2AP/A/oAzYBHRKSZbbWyOmnAa0qppsDtwAs3QZ8zGQUcsbUSZchEYKVSqglwK5W87yISCLwMtFNKBWOk+XjYtlpZhVlA71yyt4A/lVINgT/N96VGG5jS0QE4oZQ6pZRKAeYB/W2sk1VRSoUrpXab53EYPzqWS4FXThGRIKAvMM3WupQFIuIFdAGmAyilUpRS0TZVqmxwAFxFxAFww8JZcMsDSqm/gKhc4v7AbPN8NjDAEm1pA1M6AoFz2d6HcRP82GYiInWA1sA2G6tSFkwA3gQybKxHWVEPuAzMNKcFp4mIu62VsiZKqfPAF8A/QDgQo5RabVutygx/pVQ4GA+RQHVLVKoNTOmQPGQ3hd+3iHgAC4HRSqlYW+tjTUTkXiBCKbXL1rqUIQ5AG2CyUqo1kICFpk3KK+a6Q3+gLlATcBeRx22rVcVGG5jSEQbUyvY+iEo4pM6NiDhiGJcflVK/2lqfMqATcJ+InMGYBu0uIj/YViWrEwaEKaUyR6cLMAxOZeYu4LRS6rJSKhX4FehoY53KiksiEgBgvkZYolJtYErHDqChiNQVESeMBcElNtbJqoiIYMzLH1FKfWVrfcoCpdQYpVSQUqoOxme8VilVqZ9slVIXgXMi0tgU9QAO21ClsuAf4HYRcTO/5z2o5I4N2VgCDDXPhwK/WaJSB0tUcrOilEoTkReBVRgeJzOUUodsrJa16QQ8ARwQkb2m7P+UUsttp5LGSrwE/Gg+PJ0CnrSxPlZFKbVNRBYAuzG8JfdQCcPGiMhcoBtQTUTCgHeBT4H5IjICw9A+aJG2dKgYjUaj0VgDPUWm0Wg0GqugDYxGo9ForII2MBqNRqOxCtrAaDQajcYqaAOj0Wg0GqugDYxGUwgiEioi7Qq5p52ITCrkHh8Red6y2hWN3G2LSE3TJVejsRrawGg0FkAptVMp9XIht/kAxTIwYmCJ/9McbSulLiilBlmgXo0mX7SB0WgwAneaOU+mmvlAVouIa7ZbHheRzWaekA55lO+WmSdGRMaaOTdCReSUiGQank+B+iKyV0TGmfe+ISI7RGS/iLyXS5dvMDb91RKRWWbbB0TkFfO++iKyUkR2icgGEWliyv1FZJGI7DOPjrnbNts4aN7vIiIzzbr3iMidpnyYiPxqtnFcRD435fZ56aPR5Ebv5NdortMQeEQp9bSIzAceADJjjrkrpTqKSBdgBhBcSF1NgDsBT+CYiEzGCBYZrJRqBSAiPc02O2AETl1i1v8P0Bh4Uin1vIi0BQLNHCWIiI/ZxhTgWaXUcRG5DfgG6A5MAtYrpQaaOYs88mi7TjZdXwBQSrUwjdRqEWlkXmuFETE72ezH1xiRdvPSR6PJgTYwGs11Tiul9prnu4A62a7NBSOXhoh4iYhPIflRlimlkoFkEYkA/PO4p6d57DHfe2AYnH+As0qprab8FFDP/HFfhmEAPDACMf5ihM0CwNl87Q4MMfVNB2Kk4AyFIcDX5v1HReQskGlg/lRKxQCIyGHgFuBQbn0KqFtzE6MNjEZzneRs5+lA9imy3DGVCouxlLuuvP7XBPhEKfVdDqExukjIakipqyJyK9ALY7QxGBgNRGeOSEpJXmknMrmhH/noM9wCemgqGXoNRqMpGg8BiEgIRiKqmBLUEYcxZZbJKmC4ORpBRAJF5IZETyJSDbBTSi0E3gbamDl4TovIg+Y9Yv7og5Hy9jlTbi9GdsrcbWfnL+Ax8/5GQG3gWH6dyEufonRec/OhRzAaTdG4KiKbAS9K+LSulIoUkU3m4voKpdQbItIU2GJOc8UDj2OMFLITiJFZMvOBcIz5+hgwWUT+Azhi5KrZB4wCppiRcdOB55RSW7K3DfwvW/3fAN+KyAGMKMLDlFLJ2abecpOfPhpNDnQ0ZY1Go9FYBT1FptFoNBqroA2MRqPRaKyCNjAajUajsQrawGg0Go3GKmgDo9FoNBqroA2MRqPRaKyCNjAajUajsQr/D8G6BJvTNYg/AAAAAElFTkSuQmCC\n", - "text/plain": [ - "
" - ] - }, - "metadata": { - "needs_background": "light" - }, - "output_type": "display_data" - } - ], - "source": [ - "# collect per-query intersections\n", - "\n", - "all_ninters = {}\n", - "for nprobe in 1, 4, 16, 64: \n", - " I = res_per_nprobe[nprobe] \n", - " ninters = [\n", - " faiss.eval_intersection(I[i0 : i0 + 1], Igt[i0 : i0 + 1])\n", - " for i0 in range(10**6)\n", - " ]\n", - " all_ninters[nprobe] = ninters\n", - " pyplot.plot(np.bincount(ninters), 'o-', label=f\"nprobe {nprobe}\")\n", - "pyplot.xlabel(\"nb intersections\")\n", - "pyplot.ylabel(\"nb queries\")\n", - "pyplot.legend()\n", - "pyplot.grid() \n" - ] - }, - { - "cell_type": "code", - "execution_count": 68, - "id": "unavailable-forward", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "nprobe 1\n", - "n= 1000 stddev 0.705 %\n", - "n= 10000 stddev 0.223 %\n", - "n= 100000 stddev 0.071 %\n", - "n=1000000 stddev 0.022 %\n", - "nprobe 4\n", - "n= 1000 stddev 0.727 %\n", - "n= 10000 stddev 0.230 %\n", - "n= 100000 stddev 0.073 %\n", - "n=1000000 stddev 0.023 %\n", - "nprobe 16\n", - "n= 1000 stddev 0.525 %\n", - "n= 10000 stddev 0.166 %\n", - "n= 100000 stddev 0.053 %\n", - "n=1000000 stddev 0.017 %\n", - "nprobe 64\n", - "n= 1000 stddev 0.382 %\n", - "n= 10000 stddev 0.121 %\n", - "n= 100000 stddev 0.038 %\n", - "n=1000000 stddev 0.012 %\n" - ] - } - ], - "source": [ - "# do some math to compute standard deviations\n", - "\n", - "for nprobe in 1, 4, 16, 64: \n", - " intersection_measures = np.array(all_ninters[nprobe]) / 10 \n", - " variance = intersection_measures.var()\n", - " print(\"nprobe\", nprobe)\n", - " for n in 10**3, 10**4, 10**5, 10**6: \n", - " # sum of independent variables\n", - " # https://en.wikipedia.org/wiki/Variance#Sum_of_uncorrelated_variables_.28Bienaym.C3.A9_formula.29\n", - " variance_of_sum = n * variance\n", - " variance_of_mean = variance_of_sum * (1 / n) ** 2\n", - " sttdev_of_mean = np.sqrt(variance_of_mean)\n", - " print(f\"n={n:-7} stddev {100*sttdev_of_mean:.3f} % (percentage points)\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "realistic-guinea", - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.7.9" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/notebooks/large_coarse_quantizer.ipynb b/notebooks/large_coarse_quantizer.ipynb deleted file mode 100644 index ecccd1401..000000000 --- a/notebooks/large_coarse_quantizer.ipynb +++ /dev/null @@ -1,586 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "id": "9f85f826", - "metadata": {}, - "source": [ - "It is useful to cluster the 1B datasets to around 262k - 1M clusters for IVF indexing with Faiss.\n", - "However, it is not feasible to do the clustering within the allocated time for indexing. \n", - "\n", - "Therefore, here we evaluate other options to break down the clustering cost, while getting the same number of clusters.\n", - "The model that we use is: Deep1M (1M database vectors), 4096 clusters (which conveniently breaks down to 2^6 * 2^6)\n" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "id": "ongoing-first", - "metadata": {}, - "outputs": [], - "source": [ - "import numpy as np\n", - "import faiss\n", - "from faiss.contrib import datasets" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "id": "finnish-giant", - "metadata": {}, - "outputs": [], - "source": [ - "ds = datasets.DatasetDeep1B(10**6)" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "id": "satisfied-adoption", - "metadata": {}, - "outputs": [], - "source": [ - "xt = ds.get_train(10**5)\n", - "d = ds.d\n", - "xb = ds.get_database()\n", - "xq = ds.get_queries()\n", - "gt = ds.get_groundtruth()\n" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "id": "genetic-sleep", - "metadata": {}, - "outputs": [], - "source": [ - "sqrt_nlist = 64\n", - "nlist = sqrt_nlist**2" - ] - }, - { - "cell_type": "markdown", - "id": "indoor-client", - "metadata": {}, - "source": [ - "# Flat quantizer" - ] - }, - { - "cell_type": "markdown", - "id": "1a18ca10", - "metadata": {}, - "source": [ - "Flat quantizer is what we would like to apprach, but it probably too costly. \n", - "We include it here as a topline.\n", - "The measure we use is recall of nearest neighbor vs. number of computed distances." - ] - }, - { - "cell_type": "code", - "execution_count": 22, - "id": "romance-pacific", - "metadata": {}, - "outputs": [], - "source": [ - "quantizer = faiss.IndexFlatL2(d)\n", - "index = faiss.IndexIVFFlat(quantizer, d, nlist)" - ] - }, - { - "cell_type": "code", - "execution_count": 23, - "id": "noble-possession", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "1.431187283968" - ] - }, - "execution_count": 23, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "index.train(xt)\n", - "index.add(xb)\n", - "index.invlists.imbalance_factor()" - ] - }, - { - "cell_type": "code", - "execution_count": 24, - "id": "described-chicago", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "nprobe=1 1-recall @ 1: 0.3745 dis/q=349.15\n", - "nprobe=4 1-recall @ 1: 0.6849 dis/q=1344.67\n", - "nprobe=16 1-recall @ 1: 0.9004 dis/q=5040.35\n", - "nprobe=64 1-recall @ 1: 0.9793 dis/q=18331.49\n" - ] - } - ], - "source": [ - "stats = faiss.cvar.indexIVF_stats\n", - "for nprobe in 1, 4, 16, 64: \n", - " index.nprobe = nprobe \n", - " stats.reset()\n", - " D, I = index.search(xq, 100)\n", - " rank = 1\n", - " recall = (I[:, :rank] == gt[:, :1]).sum() / len(xq)\n", - " print(f\"nprobe={nprobe} 1-recall @ {rank}: {recall} dis/q={stats.ndis/len(xq):.2f}\")" - ] - }, - { - "cell_type": "markdown", - "id": "realistic-valve", - "metadata": {}, - "source": [ - "# IMI quantizer" - ] - }, - { - "cell_type": "markdown", - "id": "c0388876", - "metadata": {}, - "source": [ - "The IMI quantizer is a cheap way of breaking down the dataset into buckets. It is a PQ2x6 and each PQ code ends in a separate bucket. " - ] - }, - { - "cell_type": "code", - "execution_count": 25, - "id": "amateur-earth", - "metadata": {}, - "outputs": [], - "source": [ - "quantizer = faiss.MultiIndexQuantizer(d, 2, int(np.log2(sqrt_nlist)))" - ] - }, - { - "cell_type": "code", - "execution_count": 26, - "id": "unsigned-motorcycle", - "metadata": {}, - "outputs": [], - "source": [ - "index = faiss.IndexIVFFlat(quantizer, d, nlist)\n", - "index.quantizer_trains_alone = 1" - ] - }, - { - "cell_type": "code", - "execution_count": 27, - "id": "organizational-impossible", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "16.421237645312" - ] - }, - "execution_count": 27, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "index.train(xt)\n", - "index.add(xb)\n", - "index.invlists.imbalance_factor()\n" - ] - }, - { - "cell_type": "code", - "execution_count": 28, - "id": "7be36ece", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "nprobe=1 1-recall @ 1: 0.437 dis/q=3972.32\n", - "nprobe=4 1-recall @ 1: 0.6948 dis/q=9210.20\n", - "nprobe=16 1-recall @ 1: 0.8656 dis/q=19246.74\n", - "nprobe=64 1-recall @ 1: 0.9613 dis/q=41114.89\n" - ] - } - ], - "source": [ - "stats = faiss.cvar.indexIVF_stats\n", - "\n", - "for nprobe in 1, 4, 16, 64: \n", - " index.nprobe = nprobe \n", - " stats.reset()\n", - "\n", - " D, I = index.search(xq, 100)\n", - " rank = 1\n", - " recall = (I[:, :rank] == gt[:, :1]).sum() / len(xq)\n", - " print(f\"nprobe={nprobe} 1-recall @ {rank}: {recall} dis/q={stats.ndis/len(xq):.2f}\")" - ] - }, - { - "cell_type": "markdown", - "id": "cc986a53", - "metadata": {}, - "source": [ - "So way less efficient than the flat quantizer, due to imbalanced inverted lists. TBH, the IMI quantizer usually sets a cap on the number of distances rather than fixing the number of visited buckets. " - ] - }, - { - "cell_type": "markdown", - "id": "south-differential", - "metadata": {}, - "source": [ - "# Residual quantizer" - ] - }, - { - "cell_type": "markdown", - "id": "9e5910d8", - "metadata": {}, - "source": [ - "This is a 2-level additive quantizer where the first level is trained first, then the second. Since it is an additive quantizer, the top-k centroids can be retrieved efficiently with lookup tables. " - ] - }, - { - "cell_type": "code", - "execution_count": 33, - "id": "elect-vacation", - "metadata": {}, - "outputs": [], - "source": [ - "quantizer = faiss.ResidualCoarseQuantizer(d, 2, int(np.log2(sqrt_nlist)))" - ] - }, - { - "cell_type": "code", - "execution_count": 34, - "id": "nervous-lesbian", - "metadata": {}, - "outputs": [], - "source": [ - "index = faiss.IndexIVFFlat(quantizer, d, nlist)\n", - "index.quantizer_trains_alone = 1" - ] - }, - { - "cell_type": "code", - "execution_count": 35, - "id": "ae530558", - "metadata": {}, - "outputs": [], - "source": [ - "index.train(xt)" - ] - }, - { - "cell_type": "code", - "execution_count": 40, - "id": "ceaa6077", - "metadata": {}, - "outputs": [], - "source": [ - "quantizer.set_beam_factor(-1)" - ] - }, - { - "cell_type": "code", - "execution_count": 41, - "id": "3eb25d40", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "3.604173447168" - ] - }, - "execution_count": 41, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "index.add(xb)\n", - "index.invlists.imbalance_factor()" - ] - }, - { - "cell_type": "code", - "execution_count": 42, - "id": "af3a02de", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "nprobe=1 1-recall @ 1: 0.3079 dis/q=878.77\n", - "nprobe=4 1-recall @ 1: 0.6091 dis/q=3017.90\n", - "nprobe=16 1-recall @ 1: 0.8608 dis/q=9996.18\n", - "nprobe=64 1-recall @ 1: 0.9685 dis/q=31318.18\n" - ] - } - ], - "source": [ - "stats = faiss.cvar.indexIVF_stats\n", - "\n", - "for nprobe in 1, 4, 16, 64: \n", - " index.nprobe = nprobe \n", - " stats.reset()\n", - "\n", - " D, I = index.search(xq, 100)\n", - " rank = 1\n", - " recall = (I[:, :rank] == gt[:, :1]).sum() / len(xq)\n", - " print(f\"nprobe={nprobe} 1-recall @ {rank}: {recall} dis/q={stats.ndis/len(xq):.2f}\")" - ] - }, - { - "cell_type": "markdown", - "id": "b9db020c", - "metadata": {}, - "source": [ - "Unfortunately still not very good. " - ] - }, - { - "cell_type": "markdown", - "id": "9a0514ef", - "metadata": {}, - "source": [ - "# 2-level tree quantizer" - ] - }, - { - "cell_type": "markdown", - "id": "adfc3b23", - "metadata": {}, - "source": [ - "This is a suggestion by Harsha: just cluster to 64 centroids at the first level and train separate clusterings within each bucket." - ] - }, - { - "cell_type": "code", - "execution_count": 47, - "id": "4f86ff7a", - "metadata": {}, - "outputs": [], - "source": [ - "# 1st level quantizer " - ] - }, - { - "cell_type": "code", - "execution_count": 48, - "id": "8157ef04", - "metadata": {}, - "outputs": [], - "source": [ - "km = faiss.Kmeans(d, sqrt_nlist)" - ] - }, - { - "cell_type": "code", - "execution_count": 49, - "id": "29b154ce", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "9879.4462890625" - ] - }, - "execution_count": 49, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "km.train(xt)" - ] - }, - { - "cell_type": "code", - "execution_count": 50, - "id": "27a355a7", - "metadata": {}, - "outputs": [], - "source": [ - "centroids1 = km.centroids" - ] - }, - { - "cell_type": "code", - "execution_count": 62, - "id": "6083d36f", - "metadata": {}, - "outputs": [], - "source": [ - "xt2 = ds.get_train(500_000)\n", - "\n", - "_, assign1 = km.assign(xt2)\n", - "bc = np.bincount(assign1)\n", - "o = assign1.argsort()" - ] - }, - { - "cell_type": "code", - "execution_count": 64, - "id": "32e64dfb", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "63\r" - ] - } - ], - "source": [ - "i0 = 0\n", - "c2 = []\n", - "for c1 in range(sqrt_nlist): \n", - " print(c1, end=\"\\r\", flush=True)\n", - " i1 = i0 + bc[c1]\n", - " subset = o[i0:i1]\n", - " assert np.all(assign1[subset] == c1)\n", - " km = faiss.Kmeans(d, sqrt_nlist)\n", - " xtsub = xt2[subset]\n", - " km.train(xtsub)\n", - " c2.append(km.centroids)\n", - " i0 = i1" - ] - }, - { - "cell_type": "markdown", - "id": "704c495a", - "metadata": {}, - "source": [ - "Then we just stack the centroids together and forget about the first level clustering. \n", - "In reality with 262k-1M clusters, we'll train a HNSW or NSG index on top. " - ] - }, - { - "cell_type": "code", - "execution_count": 65, - "id": "b41aeeae", - "metadata": {}, - "outputs": [], - "source": [ - "centroids12 = np.vstack(c2)" - ] - }, - { - "cell_type": "code", - "execution_count": 66, - "id": "7041f966", - "metadata": {}, - "outputs": [], - "source": [ - "quantizer = faiss.IndexFlatL2(d)\n", - "quantizer.add(centroids12)\n", - "index = faiss.IndexIVFFlat(quantizer, d, nlist)" - ] - }, - { - "cell_type": "code", - "execution_count": 68, - "id": "1bf4175d", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "1.200742457344" - ] - }, - "execution_count": 68, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "index.add(xb)\n", - "index.invlists.imbalance_factor()" - ] - }, - { - "cell_type": "code", - "execution_count": 69, - "id": "6d2acf15", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "nprobe=1 1-recall @ 1: 0.3774 dis/q=291.20\n", - "nprobe=4 1-recall @ 1: 0.6847 dis/q=1153.03\n", - "nprobe=16 1-recall @ 1: 0.8995 dis/q=4459.66\n", - "nprobe=64 1-recall @ 1: 0.9825 dis/q=16942.70\n" - ] - } - ], - "source": [ - "stats = faiss.cvar.indexIVF_stats\n", - "for nprobe in 1, 4, 16, 64: \n", - " index.nprobe = nprobe \n", - " stats.reset()\n", - " D, I = index.search(xq, 100)\n", - " rank = 1\n", - " recall = (I[:, :rank] == gt[:, :1]).sum() / len(xq)\n", - " print(f\"nprobe={nprobe} 1-recall @ {rank}: {recall} dis/q={stats.ndis/len(xq):.2f}\")" - ] - }, - { - "cell_type": "markdown", - "id": "35c0a565", - "metadata": {}, - "source": [ - "Turns out this is very good: same level of accuracy as the flat topline!" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "1b4f1c3a", - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.8.10" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/plot.py b/plot.py deleted file mode 100644 index 941b78cab..000000000 --- a/plot.py +++ /dev/null @@ -1,160 +0,0 @@ -import os -import matplotlib as mpl -mpl.use('Agg') # noqa -import matplotlib.pyplot as plt -import numpy as np -import argparse - -from benchmark.datasets import DATASETS -from benchmark.algorithms.definitions import get_definitions -from benchmark.plotting.metrics import all_metrics as metrics -from benchmark.plotting.utils import (get_plot_label, compute_metrics, - create_linestyles, create_pointset) -from benchmark.results import (store_results, load_all_results, - get_unique_algorithms) - - -def create_plot(all_data, raw, x_scale, y_scale, xn, yn, fn_out, linestyles): - xm, ym = (metrics[xn], metrics[yn]) - # Now generate each plot - handles = [] - labels = [] - plt.figure(figsize=(12, 9)) - - # Sorting by mean y-value helps aligning plots with labels - def mean_y(algo): - xs, ys, ls, axs, ays, als = create_pointset(all_data[algo], xn, yn) - return -np.log(np.array(ys)).mean() - # Find range for logit x-scale - min_x, max_x = 1, 0 - for algo in sorted(all_data.keys(), key=mean_y): - xs, ys, ls, axs, ays, als = create_pointset(all_data[algo], xn, yn) - min_x = min([min_x]+[x for x in xs if x > 0]) - max_x = max([max_x]+[x for x in xs if x < 1]) - color, faded, linestyle, marker = linestyles[algo] - handle, = plt.plot(xs, ys, '-', label=algo, color=color, - ms=7, mew=3, lw=3, linestyle=linestyle, - marker=marker) - handles.append(handle) - if raw: - handle2, = plt.plot(axs, ays, '-', label=algo, color=faded, - ms=5, mew=2, lw=2, linestyle=linestyle, - marker=marker) - labels.append(algo) - - ax = plt.gca() - ax.set_ylabel(ym['description']) - ax.set_xlabel(xm['description']) - # Custom scales of the type --x-scale a3 - if x_scale[0] == 'a': - alpha = int(x_scale[1:]) - fun = lambda x: 1-(1-x)**(1/alpha) - inv_fun = lambda x: 1-(1-x)**alpha - ax.set_xscale('function', functions=(fun, inv_fun)) - if alpha <= 3: - ticks = [inv_fun(x) for x in np.arange(0,1.2,.2)] - plt.xticks(ticks) - if alpha > 3: - from matplotlib import ticker - ax.xaxis.set_major_formatter(ticker.LogitFormatter()) - #plt.xticks(ticker.LogitLocator().tick_values(min_x, max_x)) - plt.xticks([0, 1/2, 1-1e-1, 1-1e-2, 1-1e-3, 1-1e-4, 1]) - # Other x-scales - else: - ax.set_xscale(x_scale) - ax.set_yscale(y_scale) - ax.set_title(get_plot_label(xm, ym)) - box = plt.gca().get_position() - # plt.gca().set_position([box.x0, box.y0, box.width * 0.8, box.height]) - ax.legend(handles, labels, loc='center left', - bbox_to_anchor=(1, 0.5), prop={'size': 9}) - plt.grid(b=True, which='major', color='0.65', linestyle='-') - plt.setp(ax.get_xminorticklabels(), visible=True) - - # Logit scale has to be a subset of (0,1) - if 'lim' in xm and x_scale != 'logit': - x0, x1 = xm['lim'] - plt.xlim(max(x0,0), min(x1,1)) - elif x_scale == 'logit': - plt.xlim(min_x, max_x) - if 'lim' in ym: - plt.ylim(ym['lim']) - - # Workaround for bug https://github.com/matplotlib/matplotlib/issues/6789 - ax.spines['bottom']._adjust_location() - - plt.savefig(fn_out, bbox_inches='tight') - plt.close() - - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.add_argument( - '--dataset', - metavar="DATASET", - default='sift-1M') - parser.add_argument( - '--count', - default=-1, - type=int) - parser.add_argument( - '--definitions', - metavar='FILE', - help='load algorithm definitions from FILE', - default='algos.yaml') - parser.add_argument( - '--limit', - default=-1) - parser.add_argument( - '-o', '--output') - parser.add_argument( - '-x', '--x-axis', - help='Which metric to use on the X-axis', - choices=metrics.keys(), - default="k-nn") - parser.add_argument( - '-y', '--y-axis', - help='Which metric to use on the Y-axis', - choices=metrics.keys(), - default="qps") - parser.add_argument( - '-X', '--x-scale', - help='Scale to use when drawing the X-axis. Typically linear, logit or a2', - default='linear') - parser.add_argument( - '-Y', '--y-scale', - help='Scale to use when drawing the Y-axis', - choices=["linear", "log", "symlog", "logit"], - default='linear') - parser.add_argument( - '--raw', - help='Show raw results (not just Pareto frontier) in faded colours', - action='store_true') - parser.add_argument( - '--recompute', - help='Clears the cache and recomputes the metrics', - action='store_true') - args = parser.parse_args() - - if not args.output: - args.output = 'results/%s.png' % (args.dataset) - print('writing output to %s' % args.output) - - dataset = DATASETS[args.dataset]() - - if args.count == -1: - args.count = dataset.default_count() - if args.x_axis == "k-nn" and dataset.search_type() == "range": - args.x_axis = "ap" - count = int(args.count) - unique_algorithms = get_unique_algorithms() - results = load_all_results(args.dataset, count) - linestyles = create_linestyles(sorted(unique_algorithms)) - runs = compute_metrics(dataset.get_groundtruth(k=args.count), - results, args.x_axis, args.y_axis, args.recompute) - if not runs: - raise Exception('Nothing to plot') - - create_plot(runs, args.raw, args.x_scale, - args.y_scale, args.x_axis, args.y_axis, args.output, - linestyles) diff --git a/requirements.txt b/requirements.txt deleted file mode 100644 index 62ffd69d7..000000000 --- a/requirements.txt +++ /dev/null @@ -1,11 +0,0 @@ -ansicolors==1.1.8 -docker==2.6.1 -h5py==2.10.0 -matplotlib==2.1.0 -numpy==1.16.0 -pyyaml==5.4 -psutil==5.6.6 -scipy==1.0.0 -scikit-learn==0.19.1 -jinja2==2.11.3 -pandas diff --git a/requirements_py38.txt b/requirements_py38.txt deleted file mode 100644 index 503d4cf7f..000000000 --- a/requirements_py38.txt +++ /dev/null @@ -1,11 +0,0 @@ -ansicolors==1.1.8 -docker==2.6.1 -h5py==2.10.0 -matplotlib==3.3.4 -numpy==1.19.5 -pyyaml==5.4 -psutil==5.8.0 -scipy==1.5.4 -scikit-learn -jinja2==2.11.3 -pandas==1.1.5 diff --git a/run.py b/run.py deleted file mode 100644 index 043943b25..000000000 --- a/run.py +++ /dev/null @@ -1,6 +0,0 @@ -from benchmark.main import main -from multiprocessing import freeze_support - -if __name__ == "__main__": - freeze_support() - main() diff --git a/run_algorithm.py b/run_algorithm.py deleted file mode 100644 index cdf383ddd..000000000 --- a/run_algorithm.py +++ /dev/null @@ -1,3 +0,0 @@ -from benchmark.runner import run_from_cmdline - -run_from_cmdline() diff --git a/style.css b/style.css new file mode 100644 index 000000000..b4243362a --- /dev/null +++ b/style.css @@ -0,0 +1,572 @@ +:root { + --color-navy: #01004b; + --color-blue: #1c17ff; + --color-light-gray: #f1f5f8; + --color-gray: #525252; + + --spacing-sm: 1.5625rem; /* 25px */ + --spacing-md: 3.125rem; /* 50px */ + --spacing-lg: 4.6875rem; /* 75px */ +} + +@font-face { + font-family: "MediumLLWeb"; + src: url("/fonts/MediumLLWeb-Regular.woff2") format("woff2"); + font-weight: 400; + font-style: normal; + font-display: swap; +} + +@font-face { + font-family: "MediumLLWeb"; + src: url("/fonts/MediumLLWeb-SemiBold.woff2") format("woff2"); + font-weight: 600; + font-style: normal; + font-display: swap; +} + +*, +*:before, +*:after { + box-sizing: border-box; + margin: 0; + padding: 0; +} + +html { + scroll-behavior: smooth; +} + +body { + font-family: "MediumLLWeb", sans-serif; +} + +h1 { + font-size: 2.75rem; + line-height: 3.3rem; +} + +h2 { + font-size: 1.875rem; + line-height: 2.25rem; +} + +h3 { + font-size: 1.25rem; + line-height: 1.75rem; +} + +h4 { + font-size: 1.125rem; + line-height: 1.5rem; +} + +/* Text */ +p, +li { + font-size: 1rem; + line-height: 1.375rem; +} + +a { + text-decoration: none; + color: var(--color-blue); + transition: color 0.3s ease-in-out; +} + +a:hover { + color: var(--color-navy); +} + +.container { + width: 100%; + max-width: 77rem; + margin: 0 auto; + padding-left: 1.5rem; + padding-right: 1.5rem; +} + +/* Header/footer */ +.header { + background-color: #fff; + position: sticky; + top: 0; +} + +.footer { + background-color: var(--color-navy); +} + +.header a, +.footer a { + text-decoration: none; + font-size: 1rem; +} + +.header a { + color: #000; +} + +.footer a { + color: #fff; +} + +.header > .container, +.footer > .container { + display: flex; + justify-content: space-between; + align-items: center; + padding-top: var(--spacing-sm); + padding-bottom: var(--spacing-sm); + gap: var(--spacing-sm); + /* max-width: 56rem; */ +} + +.footer > .container { + flex-direction: column; + align-items: center; +} + +/* Header/footer Title */ +.header .title, +.footer .title { + text-decoration: none; + font-weight: 600; + white-space: nowrap; +} + +/* Nav */ +.header nav, +.header .external-links { + display: none; +} + +.header nav > ul, +.footer nav > ul { + display: flex; + flex-wrap: wrap; + list-style: none; + justify-content: center; + row-gap: 1rem; + column-gap: 1.875rem; +} + +/* External / logo links */ +.external-links { + display: flex; + gap: 1.875rem; +} + +.external-links img { + width: 1.875rem; + height: 1.875rem; + object-fit: contain; +} + +.content-section { + padding-top: var(--spacing-md); + padding-bottom: var(--spacing-md); +} + +/* Hero */ +.hero { + background-color: var(--color-light-gray); +} + +.hero > .container { + text-align: center; + display: flex; + flex-direction: column; + gap: var(--spacing-md); +} + +.hero-eyebrow { + font-weight: 600; + color: var(--color-navy); +} + +.hero h1 { + margin-top: var(--spacing-sm); + color: var(--color-navy); +} + +.hero h1 > span { + display: block; + color: var(--color-blue); +} + +.hero .buttons { + display: flex; + flex-wrap: wrap; + justify-content: center; + gap: 1.25rem; +} + +.button { + width: 100%; + padding: 0.75rem 1.875rem; + font-size: 1.125rem; + font-weight: 600; + text-decoration: none; + background-color: var(--color-blue); + border: 2px solid var(--color-blue); + border-radius: 5px; + color: #fff; + transition: all 0.3s ease-in-out; + text-align: center; +} + +.button.button-white { + background-color: #fff; + color: var(--color-blue); + border: 2px solid var(--color-blue); +} + +.button.button-alpha:hover { + color: #fff; + background-color: #030080; + border-color: #030080; +} + +.button.button-white:hover { + background-color: var(--color-light-gray); +} + +.presented-by { + font-size: 1.25rem; + line-height: 1.75rem; + text-align: center; + display: flex; + flex-wrap: wrap; + justify-content: center; + align-items: baseline; + gap: 0.9375rem; +} + +.presented-by .microsoft-logo, +.presented-by .pinecone-logo { + width: 7rem; + height: auto; +} + +.presented-by .aws-logo { + width: 2.5rem; + height: auto; +} + +.presented-by .zilliz-logo { + width: 6rem; + height: auto; +} + +.hero .hero-text { + color: var(--color-gray); +} + +/* Results section */ +.results { + background-color: #fff; +} + +.results .buttons { + display: flex; + flex-wrap: wrap; + justify-content: center; + gap: 1.25 rem; + margin-top: var(--spacing-md); +} + +.results .title { + text-align: center; + color: var(--color-navy); +} + +.results h3 { + margin-top: var(--spacing-sm); + text-align: center; +} + +.results h4 { + margin-top: var(--spacing-md); +} + +.result-list { + display: flex; + flex-direction: column; + gap: 1rem; + margin: var(--spacing-sm) 0 var(--spacing-sm); + list-style: disc outside; + padding-left: 1.4rem; + color: var(--color-gray); +} + +.result-list li > span:not(.authors), +.result-list li::marker { + color: #000; + font-weight: 600; +} + +.result-list li > span.authors { + display: block; + margin-top: 0.5rem; + font-size: 1rem; +} + +.result-list li > span.authors > span { + color: #000; + font-weight: 600; +} + +/* Tracks section */ +.tracks { + background-color: var(--color-light-gray); +} + +.tracks .title { + text-align: center; +} + +.tracks .title > h2 { + color: var(--color-navy); + margin-bottom: var(--spacing-sm); +} + +.tracks .title > p { + color: var(--color-gray); +} + +.tracks-list { + display: flex; + flex-direction: column; + gap: 1.75rem; + margin: var(--spacing-md) 0; + list-style: disc outside; + padding-left: 1.4rem; + color: var(--color-gray); +} + +.tracks-list li > span, +.tracks-list li::marker { + color: #000; + font-weight: 600; +} + +.table-notes { + display: block; + text-align: center; + margin-top: var(--spacing-sm); + font-size: 1rem; +} + +.table-notes > a { + text-decoration: none; + color: var(--color-blue); +} + +/* Table */ +.table-container { + text-align: center; + overflow-x: auto; +} + +.group-border { + border-right: 3px solid #738fab; +} + +table { + border-collapse: collapse; + font-size: 1rem; + margin: 0 auto; +} + +th, +td { + border: 2px solid #738fab; + padding: 0.75rem; +} + +th { + background-color: #25239d; + color: #fff; +} + +/* Participation */ +.participate { + background-color: #fff; +} + +.participate h2 { + text-align: center; + color: var(--color-navy); +} + +.participate-section { + margin-top: var(--spacing-md); +} + +.participate-section > ul { + padding-left: 1.4rem; + margin-top: var(--spacing-sm); + display: flex; + flex-direction: column; + gap: var(--spacing-sm); +} + +.participate-section > ul > li span { + font-weight: 600; +} + +/* Contributor section */ + +.organizers { + background-color: var(--color-light-gray); +} + +.organizers > .container { + display: flex; + flex-direction: column; + align-items: center; + gap: var(--spacing-md); +} + +.organizers .title { + text-align: center; +} + +.organizers .title > h2 { + color: var(--color-navy); +} + +.organizers .title > p { + color: var(--color-gray); + margin: var(--spacing-sm) auto 0; + max-width: 54rem; +} + +.organizer-grid { + display: flex; + flex-wrap: wrap; + justify-content: center; + row-gap: var(--spacing-sm); + column-gap: var(--spacing-md); + max-width: 59.375rem; +} + +.organizer { + width: 10.2rem; +} + +.organizer > img { + width: 9.8125rem; + height: 9.8125rem; + object-fit: cover; + border-radius: 5px; +} + +.organizer .name { + display: block; + font-size: 1.25rem; + font-weight: 600; + color: #0a2540; +} + +.organizer .role { + display: block; + font-size: 1rem; + color: var(--color-gray); +} + +.organizer .image-border { + border: 1px solid var(--color-navy); +} + +.organizers .presented-by { + color: var(--color-navy); +} + +@media only screen and (min-width: 768px) { + .container { + padding: 0 2.5rem; + } + + .content-section { + padding-top: var(--spacing-lg); + padding-bottom: var(--spacing-lg); + } + + p, + li { + font-size: 1.25rem; + line-height: 1.75rem; + } + + h1 { + font-size: 3.75rem; + line-height: 4.125rem; + } + + h2 { + font-size: 2.75rem; + line-height: 3.3rem; + } + + h3 { + font-size: 1.875rem; + line-height: 2.25rem; + } + + h4 { + font-size: 1.5rem; + } + + .button { + width: auto; + } + + .header nav { + display: block; + } + + .tracks-list { + margin: var(--spacing-lg) 0; + } + + .participate-section { + margin-top: var(--spacing-lg); + } + + .organizers > .container { + gap: var(--spacing-lg); + } + + .organizer-grid { + row-gap: var(--spacing-md); + column-gap: var(--spacing-lg); + } + + .presented-by { + font-size: 1.875rem; + line-height: 2.25rem; + } + + .presented-by .microsoft-logo, + .presented-by .pinecone-logo { + width: 10rem; + height: auto; + } + + .presented-by .aws-logo { + width: 3.5rem; + height: auto; + } +} + +@media only screen and (min-width: 1024px) { + .header .external-links { + display: flex; + } + + .footer > .container { + flex-direction: row; + } +} diff --git a/t1_t2/README.md b/t1_t2/README.md deleted file mode 100644 index 22cec24be..000000000 --- a/t1_t2/README.md +++ /dev/null @@ -1,296 +0,0 @@ -# T1 and T2 Tracks - -## Table Of Contents - -- [Introduction](#introduction) -- [For Participants](#for_participants) - - [Getting Started](#getting_started) - - [Starting Your Development](#starting_your_development) - - [Developing Your Dockerfile](#developing_your_dockerfile) - - [Developing Your Algorithm](#developing_your_algorithm) - - [Measuring your Algorithm](#measuring_your_algorithm) - - [How To Get Help](#how_to_get_help) - - [Leaderboard](#leaderboard) -- [For Evaluators](#for_organizers) - -## Introduction - -The T1 and T2 tracks evaluate algorithms on standardized Azure CPU servers. - -**Track 1**: In-memory indices with [FAISS](https://github.com/facebookresearch/faiss) as the baseline. Search would use Azure [Standard_F32s_v2 VMs](https://docs.microsoft.com/en-us/azure/virtual-machines/fsv2-series) with 32 vCPUs and 64GB RAM. - -**Track 2:** Out-of-core indices with [DiskANN](https://github.com/Microsoft/diskann) as the baseline. In addition to the limited DRAM in T1, index can use an SSD for search. Search would use Azure [Standard_L8s_v2 VMs](https://docs.microsoft.com/en-us/azure/virtual-machines/lsv2-series) with 8 vCPUS, 64GB RAM and a local SSD Index constrained to 1TB. - -Index construction for both tracks would use Azure [Standard_F64s_v2 VM](https://docs.microsoft.com/en-us/azure/virtual-machines/fsv2-series) with 64vCPUs, 128GB RAM and an additional [4TB of Premium SSD](https://docs.microsoft.com/en-us/azure/virtual-machines/disks-types) to be used for storing the data, index and other intermediate data. There is a **time limit for 4 days per dataset** for index build. - -Queries will be supplied in one shot and the algorithm can execute the queries in any order. - -We will release plots for recall vs QPS separately for tracks T1 and T2. -Additionally, we will release leaderboards for T1 and T2. The metric for the leaderboard in each track will be the sum of improvements in recall over the baseline at the target QPS over all datasets. **The target recall -for T1 is 10000 QPS and for T2 is 1500 QPS.** - -Participants must submit their algorithm via a pull request and (optionally) index file(s) upload (one per participating dataset). - - -## For_Participants - -### Requirements - -You will need the following installed on your machine: -* Python ( we tested with Anaconda using an environment created for Python version 3.8 ) and Docker. -* Note that we tested everything on Ubuntu Linux 18.04 but other environments should be possible. - -### Getting_Started - -This section will present a small tutorial about how to use this framework and several of the key scripts you will use throughout the development of your algorithm and eventual submission. - -First, clone this repository and cd into the project directory: -``` -git clone -cd -``` -Install the python package requirements: -``` -pip install -r requirements_py38.txt -``` -Create a small, sample dataset. For example, to create a dataset with 10000 20-dimensional random floating point vectors, run: -``` -python create_dataset.py --dataset random-xs -``` -To create a smaller slice of the competition datasets (e.g. 10M slice of deep-1B), run: -``` -python create_dataset.py --dataset deep-10M -``` -To see a complete list of datasets, run the following: -``` -python create_dataset.py --help -``` - -For T2, set up the local SSD on Azure Ls8v2 machine by running the following under sudo. -``` -parted /dev/nvme0n1 mklabel gpt mkpart primary 0% 100%; -mkfs -t ext4 /dev/nvme0n1; -mkdir /nvme; -mount /dev/nvme0n1 /nvme/; -echo "/dev/nvme0n1 /nvme ext4 defaults 0 0" >> /etc/fstab; -``` -You might also want to create a symbolic link to a folder under `/nvme` -``` -sudo mkdir /nvme/data -sudo chmod 777 /nvme/data -ln -s /nvme/data data -``` - -Build the docker container for the T1 or T2 baselines: -``` -#for T1 -python install.py --algorithm faissconda -#for T2 -python install.py --algorithm diskann -``` -Run a benchmark evaluation using the algorithm's definition file: -``` -python run.py --algorithm faiss_t1 --dataset random-xs -python run.py --algorithm diskann-t2 --dataset random-xs -``` - -For the competition dataset (e.g. deep-1B), running the following command downloads a prebuilt index and runs the queries locally. -``` -python run.py --algorithm faiss_t1 --dataset deep-1B -python run.py --algorithm diskann-t2 --dataset deep-1B -``` - -Now plot QPS vs recall: -``` -python plot.py --algorithm faiss_t1 --dataset random-xs -python plot.py --algorithm diskann-t2 --dataset random-xs -``` -This will place a plot into the *results/* directory. - -### Starting_Your_Development - -First, please create a short name for your team without spaces or special characters. Henceforth in these instructions, this will be referenced as [your_team_name]. - -Create a custom branch off main in this repository: -``` -# For t1 -git checkout -b t1/[your_team_name] -# For t2 -git checkout -b t2/[your_team_name] -``` - - -### Developing_Your_Dockerfile - -This framework evaluates algorithms in Docker containers by default. Your algorithm's Dockerfile should live in *install/Docker.[your_team_name]*. Your Docker file should contain everything needed to install and run your algorithm on a system with the same hardware. - -Please consult the Dockerfiles [here](../install/Dockerfile.faissconda) and [here](../install/Dockerfile.diskann) for examples. - -To build your Docker container, run: -``` -python install.py --install [your_team_name] -``` - -### Developing_Your_Algorithm - -Develop and add your algorithm's python class to the [benchmark/algorithms](../benchmark/algorithms) directory. -* You will need to subclass from the [BaseANN class](../benchmark/algorithms/base.py) and implement the functions of that parent class. -* You should consult the examples already in the directory. -* If it is difficult to write a Python wrapper, please consult [HttpANN](../benchmark/algorithms/httpann_example.py) for a RESTful API. - - -When you are ready to test on the competition datasets, use the create_dataset.py script as follows: -``` -python create_dataset.py --dataset [sift-1B|bigann-1B|text2image-1B|msturing-1B|msspacev-1B|ssnpp-1B] -``` -To benchmark your algorithm, first create an algorithm configuration yaml in your teams directory called *algos.yaml.* This file contains the index build parameters and query parameters that will get passed to your algorithm at run-time. Please look at [algos.yaml](../algos.yaml). - -If your machine is capable of both building and searching an index, you can benchmark your algorithm using the run.py script. -``` -python run.py --algorithm diskann-t2 --dataset deep-1B -``` -This will write the results to the toplevel [results](../results) directory. - -To build the index and upload it to Azure cloud storage without querying it: -``` -python run.py --algorithm diskann-t2 --dataset deep-1B --upload-index --blob-prefix --sas-string -``` -To download the index from cloud storage and query it on another machine: -``` -python run.py --algorithm diskann-t2 --dataset deep-1B --download-index --blob-prefix --sas-string -``` - -### Measuring_Your_Algorithm - - -Now you can analyze the results using plot.py. Sudo might be required here. To avoid sudo, run `sudo chmod -R 777 results/` before invoking these scripts. -``` -python plot.py --algorithm [your_team_name] --dataset deep-1B -``` -This will place a plot of the algorithms performance into the toplevel [results](../results) directory. - -The plot.py script supports other benchmarks. To see a complete list, run: -``` -python plot.py --help -``` - -You can plot additional metrics, e.g. mean SSD IOs vs recall/AP for T2, using: -``` -python plot.py --dataset deep-1B -x k-nn -y mean_ssd_ios -``` - - -Here are all the FAISS baseline recall@10 (AP for SSNPP) vs throughput plots for T1: -* [msturing-1B](results/T1/msturing-1B.png) -* [bigann-1B](results/T1/bigann-1B.png) -* [text2image-1B](results/T1/text2image-1B.png) -* [deep-1B](results/T1/deep-1B.png) -* [msspacev-1B](results/T1/msspacev-1B.png) -* [ssnpp-1B](results/T1/ssnpp-1B.png) - -Here are all the DiskANN baseline recall@10 (AP for SSNPP) vs throughput plots for T2: -* [msturing-1B](results/T2/msturing-1B.png) -* [bigann-1B](results/T2/bigann-1B.png) -* [text2image-1B](results/T2/text2image-1B.png) -* [deep-1B](results/T2/deep-1B.png) -* [msspacev-1B](results/T2/msspacev-1B.png) -* [ssnpp-1B](results/T2/ssnpp-1B.png) - - -Here are all the DiskANN baseline recall@10 (AP for SSNPP) vs mean SSD IOs plots for T2: -* [msturing-1B](results/T2/msturing-1B-IO.png) -* [bigann-1B](results/T2/bigann-1B-IO.png) -* [text2image-1B](results/T2/text2image-1B-IO.png) -* [deep-1B](results/T2/deep-1B-IO.png) -* [msspacev-1B](results/T2/msspacev-1B-IO.png) -* [ssnpp-1B](results/T2/ssnpp-1B-IO.png) - -To get a table overview over the best recall/ap achieved over a certain QPS threshold, run -``` -python3 data_export.py --output res.csv -python3 eval/show_operating_points.py --algorithm $ALGO --threshold $THRESHOLD res.csv -``` - -For the track1 baseline, the output, this led to - -``` - recall/ap -algorithm dataset -faiss-t1 bigann-1B 0.634510 - deep-1B 0.650280 - msspacev-1B 0.728861 - msturing-1B 0.703611 - ssnpp-1B 0.753780 - text2image-1B 0.069275 -``` - -For the track2 baseline, this led to - -``` - recall/ap -algorithm dataset -diskann-t2 bigann-1B 0.94913 - deep-1B 0.93706 - msspacev-1B 0.90095 - msturing-1B 0.93564 - ssnpp-1B 0.16274 - text2image-1B 0.48854 -``` - -### Submitting_Your_Algorithm - -A submission is composed of a pull request to this repo with the following. -* Your algorithm's python class, inheriting from `BaseANN`, placed in the [benchmark/algorithms/](../benchmark/algorithms) directory. -* A Dockerfile in `install/` describing how to retrieve, compile and set up requirements for your algorithm. -* For each dataset you are participating in, add to [algos.yaml](../algos.yaml) - * 1 index build configuration - * 10 search configuration -* Add an entry to [CI test list](../.github/workflows/benchmarks.yml) for the random-xs dataset, and for the random-range-xs dataset if your algorithm supports range search. We can start working with larger datasets once these tests pass. -* An URL to download any prebuilt indices placed in `algos.yaml`. **This is optional, but strongly encourages.** This would help us evaluate faster, although we would build your index to verify the time limit. Please see `faiss_t1.py` and `diskann-t2.py` for examples. If you are unable to host the index on your own Azure blob storage, please let us know and we can arrange to have it copied to organizer's account. - -We will run early PRs on organizer's machines to the extent possible and provide any feedback necessary. - -### How_To_Get_Help - -There are several ways to get help as you develop your algorithm using this framework: -* You can submit an issue at this github repository. -* Send an email to the competition's T1/T2 organizer, harsha.v.simhadri@gmail.com -* Send en email to the competition's googlegroup, big-ann-organizers@googlegroups.com - -### Leaderboard - -This leaderboard is based on the standard recall@10 vs throughput benchmark that has become a standard benchmark when evaluating and comparing approximate nearest neighbor algorithms. We have run FAISS and DiskANN as baselines for T1 and T2 respectively, and for each dataset chosen the best recall amongst configurations providing at least **10K QPS for T1 and 1500 QPS for T2**. The recall of the baselines at this QPS threshold is listed [above](#measuring_your_algorithm). - -Algorithms will be ranked on how much their recall surpasses the baselines at these QPS thresholds. We will add up the recall improvements of each algorithm on all data sets it competes on. Participants are required to commit to at least 3 datasets, and ideally more. Algorithms that work on more datasets are at an advantage as they can benefit from additional scores. Any recall regression compared to the baseline on the datasets committed to will be subtracted from the final score. - -#### Results for T1 -The table lists best Recall/AP obtained at at least 10,000 QPS by algorithms in pull requests submitted before Nov 2021. All non-empty cells are derived from author-published indices that succesfully ran on the standardized hardware with author-provided query configurations. Recall for bigann-1B does not count ties. This result is on the public query set. - - -| PR | Name |bigann-1B | deep-1B | msspacev-1B | msturing-1B | ssnpp-1B | text2image-1B | -|----|-----------|-----------|---------|-------------|-------------|-----------|-----------------| -| 58 | team11 | | 0.64955 | | 0.712211 | | | -| 60 | puck-t1 |0.71468 | 0.72255 | | | | | -| 66 | ngt-t1 | | | | | | | -| 69 | kst_ann_t1| .71219 | 0.71219 | 0.764542 | 0.756419 | | | -| 71 | buddy-t1 | 0.62765 | | | | | | -|-----|-----------|-----------|---------|-------------|-------------|-----------|----------------| -| | baseline | 0.63451 | 0.65028 | 0.728861 | 0.703611 | 0.75378 | 0.069275 | -|-----|-----------|-----------|---------|-------------|-------------|-----------|----------------| - - -#### Results for T2 -The table lists best Recall/AP obtained at at least 1,500 QPS by algorithms in pull requests submitted before Nov 2021. All non-empty cells are derived from author-published indices that succesfully ran on the standardized hardware with author-provided query configurations. Recall for bigann-1B does not count ties. This result is on the public query set. - - -| PR | Name |bigann-1B | deep-1B | msspacev-1B | msturing-1B | ssnpp-1B | text2image-1B | -|----|-----------|-----------|---------|-------------|-------------|-----------|----------------| -| 62 | kota-t2 | 0.950859 | | 0.904001 | 0.939817 | 0.18212 | | -| 66 | ngt-t2 | | | | | | | -| 70 | bbann | | | 0.7602 | | 0.88573 | 0.495423 | -|----|-----------|-----------|---------|-------------|-------------|-----------|----------------| -| | baseline | 0.94913 | 0.93706 | 0.90095 | 0.93564 | 0.16274 | 0.48854 | -|----|-----------|-----------|---------|-------------|-------------|-----------|----------------| - -## For_Evaluators - diff --git a/t1_t2/results/T1/bigann-1B.png b/t1_t2/results/T1/bigann-1B.png deleted file mode 100644 index 82df3c71d..000000000 Binary files a/t1_t2/results/T1/bigann-1B.png and /dev/null differ diff --git a/t1_t2/results/T1/deep-1B.png b/t1_t2/results/T1/deep-1B.png deleted file mode 100644 index b90291c0f..000000000 Binary files a/t1_t2/results/T1/deep-1B.png and /dev/null differ diff --git a/t1_t2/results/T1/msspacev-1B.png b/t1_t2/results/T1/msspacev-1B.png deleted file mode 100644 index 3432fd1d3..000000000 Binary files a/t1_t2/results/T1/msspacev-1B.png and /dev/null differ diff --git a/t1_t2/results/T1/msturing-1B.png b/t1_t2/results/T1/msturing-1B.png deleted file mode 100644 index 357d90f19..000000000 Binary files a/t1_t2/results/T1/msturing-1B.png and /dev/null differ diff --git a/t1_t2/results/T1/ssnpp-1B.png b/t1_t2/results/T1/ssnpp-1B.png deleted file mode 100644 index f2b7b4cdc..000000000 Binary files a/t1_t2/results/T1/ssnpp-1B.png and /dev/null differ diff --git a/t1_t2/results/T1/text2image-1B.png b/t1_t2/results/T1/text2image-1B.png deleted file mode 100644 index 6cf0598a5..000000000 Binary files a/t1_t2/results/T1/text2image-1B.png and /dev/null differ diff --git a/t1_t2/results/T2/bigann-1B-IO.png b/t1_t2/results/T2/bigann-1B-IO.png deleted file mode 100644 index 9e0b6aa9e..000000000 Binary files a/t1_t2/results/T2/bigann-1B-IO.png and /dev/null differ diff --git a/t1_t2/results/T2/bigann-1B.png b/t1_t2/results/T2/bigann-1B.png deleted file mode 100644 index fe44fd88a..000000000 Binary files a/t1_t2/results/T2/bigann-1B.png and /dev/null differ diff --git a/t1_t2/results/T2/deep-1B-IO.png b/t1_t2/results/T2/deep-1B-IO.png deleted file mode 100644 index a636ca6af..000000000 Binary files a/t1_t2/results/T2/deep-1B-IO.png and /dev/null differ diff --git a/t1_t2/results/T2/deep-1B.png b/t1_t2/results/T2/deep-1B.png deleted file mode 100644 index 2fe899b4d..000000000 Binary files a/t1_t2/results/T2/deep-1B.png and /dev/null differ diff --git a/t1_t2/results/T2/msspacev-1B-IO.png b/t1_t2/results/T2/msspacev-1B-IO.png deleted file mode 100644 index 0130c437e..000000000 Binary files a/t1_t2/results/T2/msspacev-1B-IO.png and /dev/null differ diff --git a/t1_t2/results/T2/msspacev-1B.png b/t1_t2/results/T2/msspacev-1B.png deleted file mode 100644 index 5253f0cd8..000000000 Binary files a/t1_t2/results/T2/msspacev-1B.png and /dev/null differ diff --git a/t1_t2/results/T2/msturing-1B-IO.png b/t1_t2/results/T2/msturing-1B-IO.png deleted file mode 100755 index 08541aefa..000000000 Binary files a/t1_t2/results/T2/msturing-1B-IO.png and /dev/null differ diff --git a/t1_t2/results/T2/msturing-1B.png b/t1_t2/results/T2/msturing-1B.png deleted file mode 100644 index 351ae5604..000000000 Binary files a/t1_t2/results/T2/msturing-1B.png and /dev/null differ diff --git a/t1_t2/results/T2/res.csv b/t1_t2/results/T2/res.csv deleted file mode 100644 index e032ae062..000000000 --- a/t1_t2/results/T2/res.csv +++ /dev/null @@ -1,60 +0,0 @@ -algorithm,parameters,dataset,count,qps,distcomps,build,indexsize,queriessize,wspq,recall/ap -diskann-t2,DiskANN,bigann-1B,10,1692.271479416587,0.0,1000000.0,60384120.0,35682.28900295449,inf,0.9426500000000001 -diskann-t2,DiskANN,bigann-1B,10,1979.5720516956515,0.0,1000000.0,60384120.0,30503.623219107627,inf,0.9246700000000001 -diskann-t2,DiskANN,bigann-1B,10,1498.447251013022,0.0,1000000.0,60384120.0,40297.79490681267,inf,0.9549200000000001 -diskann-t2,DiskANN,bigann-1B,10,1395.0126358562839,0.0,1000000.0,60384120.0,43285.71544653797,inf,0.9599399999999999 -diskann-t2,DiskANN,bigann-1B,10,1449.7409112948683,0.0,1000000.0,60384120.0,41651.66308652115,inf,0.95691 -diskann-t2,DiskANN,bigann-1B,10,1173.2852848995394,0.0,1000000.0,60384120.0,51465.84618179226,inf,0.9699899999999999 -diskann-t2,DiskANN,bigann-1B,10,1310.0846908052526,0.0,1000000.0,60384120.0,46091.76828322792,inf,0.96411 -diskann-t2,DiskANN,bigann-1B,10,1594.14800808084,0.0,1000000.0,60384120.0,37878.615846150395,inf,0.94913 -diskann-t2,DiskANN,bigann-1B,10,981.8258342866466,0.0,1000000.0,60384120.0,61501.86508778572,inf,0.97882 -diskann-t2,DiskANN,bigann-1B,10,2303.0911898705594,0.0,1000000.0,60384120.0,26218.727363285067,inf,0.89605 -diskann-t2,DiskANN,deep-1B,10,1660.8887581780666,0.0,1000000.0,60630840.0,36505.05772976017,inf,0.9279400000000001 -diskann-t2,DiskANN,deep-1B,10,1990.253305673633,0.0,1000000.0,60630840.0,30463.88106838417,inf,0.90432 -diskann-t2,DiskANN,deep-1B,10,2307.308080428008,0.0,1000000.0,60630840.0,26277.73920366669,inf,0.8766400000000001 -diskann-t2,DiskANN,deep-1B,10,1021.5658500041819,0.0,1000000.0,60630840.0,59350.88765912819,inf,0.96555 -diskann-t2,DiskANN,deep-1B,10,1525.294026026525,0.0,1000000.0,60630840.0,39750.26386089421,inf,0.93706 -diskann-t2,DiskANN,deep-1B,10,1551.8613031064206,0.0,1000000.0,60630840.0,39069.75441596031,inf,0.93448 -diskann-t2,DiskANN,deep-1B,10,1725.2463398347454,0.0,1000000.0,60630840.0,35143.29438067818,inf,0.92354 -diskann-t2,DiskANN,deep-1B,10,1349.5413709825298,0.0,1000000.0,60630840.0,44926.99616600704,inf,0.9466699999999999 -diskann-t2,DiskANN,deep-1B,10,1220.8964718445557,0.0,1000000.0,60630840.0,49660.91834829998,inf,0.9545299999999999 -diskann-t2,DiskANN,deep-1B,10,1600.3843696590948,0.0,1000000.0,60630840.0,37885.17380541229,inf,0.9320600000000001 -diskann-t2,DiskANN,msspacev-1B,10,883.7119232408137,0.0,1000000.0,57424484.0,64980.999452184245,inf,0.914753035884841 -diskann-t2,DiskANN,msspacev-1B,10,824.5735527738586,0.0,1000000.0,57424484.0,69641.43320729183,inf,0.9160526674853322 -diskann-t2,DiskANN,msspacev-1B,10,950.6840259594734,0.0,1000000.0,57424484.0,60403.333212677695,inf,0.9133817710465275 -diskann-t2,DiskANN,msspacev-1B,10,1122.9665691083565,0.0,1000000.0,57424484.0,51136.41454669079,inf,0.9097182425978987 -diskann-t2,DiskANN,msspacev-1B,10,1505.677457711673,0.0,1000000.0,57424484.0,38138.635672525554,inf,0.9009551098376314 -diskann-t2,DiskANN,msspacev-1B,10,1702.1793320297897,0.0,1000000.0,57424484.0,33735.860211346415,inf,0.8958520944194296 -diskann-t2,DiskANN,msspacev-1B,10,1226.7363205846661,0.0,1000000.0,57424484.0,46810.779982964326,inf,0.9076204120616728 -diskann-t2,DiskANN,msspacev-1B,10,1981.1706313882712,0.0,1000000.0,57424484.0,28985.12782806637,inf,0.8887331150225133 -diskann-t2,DiskANN,msspacev-1B,10,1030.4179244402635,0.0,1000000.0,57424484.0,55729.31393948114,inf,0.91110997407559 -diskann-t2,DiskANN,msspacev-1B,10,1346.1162781430423,0.0,1000000.0,57424484.0,42659.37863794104,inf,0.9046800382043936 -diskann-t2,DiskANN,text2image-1B,10,2659.429096957006,0.0,1000000.0,63315692.0,23808.001526510936,0.27174 -diskann-t2,DiskANN,text2image-1B,10,1084.2778991135426,0.0,1000000.0,63315692.0,58394.33972763264,0.59194 -diskann-t2,DiskANN,text2image-1B,10,1987.4666943394475,0.0,1000000.0,63315692.0,31857.485803576466,0.384928 -diskann-t2,DiskANN,text2image-1B,10,950.2152736328754,0.0,1000000.0,63315692.0,66632.99755005055,0.6246579999999999 -diskann-t2,DiskANN,text2image-1B,10,1529.0100796529537,0.0,1000000.0,63315692.0,41409.59751839638,0.488544 -diskann-t2,DiskANN,text2image-1B,10,1011.6194816882818,0.0,1000000.0,63315692.0,62588.446689789984,0.609235 -diskann-t2,DiskANN,text2image-1B,10,1382.9423353887794,0.0,1000000.0,63315692.0,45783.32037409238,0.521979 -diskann-t2,DiskANN,text2image-1B,10,1715.704209800216,0.0,1000000.0,63315692.0,36903.61755734851,0.445646 -diskann-t2,DiskANN,text2image-1B,10,1166.7836574132612,0.0,1000000.0,63315692.0,54265.15155377628,0.572158 -diskann-t2,DiskANN,text2image-1B,10,1265.0557338512756,0.0,1000000.0,63315692.0,50049.72532494257,0.549218 -diskann-t2,DiskANN,msturing-1B,10,1488.9374886469645,0.0,1000000.0,60662240.0,40741.96563827896,0.937403 -diskann-t2,DiskANN,msturing-1B,10,1283.6399190951693,0.0,1000000.0,60662240.0,47257.988083418655,0.946902 -diskann-t2,DiskANN,msturing-1B,10,1439.2729152786703,0.0,1000000.0,60662240.0,42147.83683902969,0.939912 -diskann-t2,DiskANN,msturing-1B,10,1160.2685821000873,0.0,1000000.0,60662240.0,52282.929087161254,0.952509 -diskann-t2,DiskANN,msturing-1B,10,1871.1431910244708,0.0,1000000.0,60662240.0,32419.881220734784,0.918614 -diskann-t2,DiskANN,msturing-1B,10,1454.2011212492587,0.0,1000000.0,60662240.0,41715.165195228954,0.939076 -diskann-t2,DiskANN,msturing-1B,10,1526.0685111801067,0.0,1000000.0,60662240.0,39750.66620900917,0.935642 -diskann-t2,DiskANN,msturing-1B,10,2198.1542303679685,0.0,1000000.0,60662240.0,27596.898871761703,0.900497 -diskann-t2,DiskANN,msturing-1B,10,1623.0586342268134,0.0,1000000.0,60662240.0,37375.26095531235,0.9310269999999999 -diskann-t2,DiskANN,msturing-1B,10,972.1252420784929,0.0,1000000.0,60662240.0,62401.671486586,0.960867 -diskann-t2,DiskANN,ssnpp-1B,60000,1333.8432820515925,0.0,1000000.0,62999136.0,47231.28784897477,0.178445601986414 -diskann-t2,DiskANN,ssnpp-1B,60000,1856.1525168484525,0.0,1000000.0,62999136.0,33940.710921194,0.1457741211862014 -diskann-t2,DiskANN,ssnpp-1B,60000,2093.526127890431,0.0,1000000.0,62999136.0,30092.357177066573,0.13150131209619825 -diskann-t2,DiskANN,ssnpp-1B,60000,1715.8090468634405,0.0,1000000.0,62999136.0,36716.868998426515,0.1627462515985982 -diskann-t2,DiskANN,ssnpp-1B,60000,1319.99508156511,0.0,1000000.0,62999136.0,47726.796016014174,0.15215346231460414 -diskann-t2,DiskANN,ssnpp-1B,60000,1316.7561211606983,0.0,1000000.0,62999136.0,47844.1945228759,0.178445601986414 -diskann-t2,DiskANN,ssnpp-1B,60000,2409.745042633539,0.0,1000000.0,62999136.0,26143.486088947448,0.12175038200269063 -diskann-t2,DiskANN,ssnpp-1B,60000,1866.6323831709003,0.0,1000000.0,62999136.0,33750.157003588254,0.1457741211862014 -diskann-t2,DiskANN,ssnpp-1B,60000,1299.947787665343,0.0,1000000.0,62999136.0,48462.82027460816,0.195030279941537 diff --git a/t1_t2/results/T2/ssnpp-1B-IO.png b/t1_t2/results/T2/ssnpp-1B-IO.png deleted file mode 100755 index d50aa2eac..000000000 Binary files a/t1_t2/results/T2/ssnpp-1B-IO.png and /dev/null differ diff --git a/t1_t2/results/T2/ssnpp-1B.png b/t1_t2/results/T2/ssnpp-1B.png deleted file mode 100755 index a450a2b11..000000000 Binary files a/t1_t2/results/T2/ssnpp-1B.png and /dev/null differ diff --git a/t1_t2/results/T2/text2image-1B-IO.png b/t1_t2/results/T2/text2image-1B-IO.png deleted file mode 100755 index 6a136e388..000000000 Binary files a/t1_t2/results/T2/text2image-1B-IO.png and /dev/null differ diff --git a/t1_t2/results/T2/text2image-1B.png b/t1_t2/results/T2/text2image-1B.png deleted file mode 100644 index e7caf1ada..000000000 Binary files a/t1_t2/results/T2/text2image-1B.png and /dev/null differ diff --git a/t3/LB_history/Dec.2.2021/LEADERBOARDS.md b/t3/LB_history/Dec.2.2021/LEADERBOARDS.md deleted file mode 100644 index 7a8a1031f..000000000 --- a/t3/LB_history/Dec.2.2021/LEADERBOARDS.md +++ /dev/null @@ -1,571 +0,0 @@ -# T3 Track Leaderboards (Unofficial) - -Please note that all rankings are currently unofficial due to the following reasons: -* All [open tasks and issues](TASKS_ISSUES_RESOLUTIONS.md) must be resolved. - -## Final Rankings On Private Query Set - -*Not yet available* - -## Rankings On Public Query Set - -### Rankings By Submission Name (alphabetical) - -|Submission |Team |Hardware |[Recall Rank](#recall-or-ap-rankings)|[Thru-put Rank](#throughput-rankings)|[Power Rank](#power-rankings)|[Cost Rank](#cost-rankings)|Status |Anomalies|Evaluator|Algo |Runs | -|--------------------|-----------|----------|---------|---------|---------|--------|---------|---------|---------|---------|--------| -|[diskann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/diskann-bare-metal/README.md) |Microsoft Research India(*org*) |Dell PowerEdge |[1](#recall-or-ap-rankings) |[5](#throughput-rankings) |*NQ* |*NQ* |final |*NA* |Harsha Simhadri |[src](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/benchmark/algorithms/diskann-t2.py) |[nb](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/diskann-bare-metal/EvalPublic.ipynb) | -|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |[6](#recall-or-ap-rankings) |[6](#throughput-rankings) |[5](#power-rankings) |[5](#cost-rankings) |final |0/58 |George Williams |[src](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/benchmark/algorithms/faiss_t3.py) |[nb](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/EvalPublic.ipynb) | -|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |[4](#recall-or-ap-rankings) |[4](#throughput-rankings) |[4](#power-rankings) |[4](#cost-rankings) |final |0/60 |George Williams |[src](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/benchmark/algorithms/gemini.py) |[nb](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/EvalPublic.ipynb) | -|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md) |NVidia |NVidia GPU |[3](#recall-or-ap-rankings) |[3](#throughput-rankings) |[1](#power-rankings) |[2](#cost-rankings)\*\* |final |[5/50](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/ANOMALIES.md) |George Williams |[src](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/benchmark/algorithms/cuanns_ivfpq.py) |[nb](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/EvalPublic.ipynb) | -|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md) |NVidia |NVidia GPU |[5](#recall-or-ap-rankings) |[1](#throughput-rankings) |[3](#power-rankings) |[3](#cost-rankings)\*\* |final |[4/40](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/ANOMALIES.md) |George Williams |[src](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/benchmark/algorithms/cuanns_multigpu.py) |[nb](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_multigpu/EvalPublic.ipynb) | -|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md)|Intel |Intel Optane |[2](#recall-or-ap-rankings) |[2](#throughput-rankings) |[2](#power-rankings) |[1](#cost-rankings)|final |[5/50](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/ANOMALIES.md) |George Williams |[src](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/benchmark/algorithms/graphann.py) |[nb](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/EvalPublic.ipynb)| - -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *final* = final submission - * *inprog* = algorithm development still in progress - * *NQ* = not qualified - * *NA* = data is not yet available, or has not yet been processed - -* *Anomalies* are defined as queries that could potentially be the result of query response caching, a violation of the competition. Our detection method looks for a 30% or more improvement in the batch query latency between the first and last query of a query group (5). Participants have been given a chance to explain why detected anomalies (if any) are not a result of query response caching. - -* \*\*Nvidia has not yet approved the MSRP cost used for ranking, so participation in this benchmark is still pending. - -### Rankings Per Metric - -#### Recall Or AP Rankings - -|Rank|Submission |Team |Hardware|Status |Score |[Deep1B](#deep1B-recall-rankings)|[BigANN](#bigann-recall-rankings)|[MSTuring](#msturing-recall-rankings)|[MSSpace](#msspace-recall-rankings)|[Text2Image](#text2image-recall-rankings)|[FBSSNet](#fbsimsearchnet-ap-rankings)| -|----|------------------|-------|--------|-------|-----------|------|------|--------|-------|----------|-------| -| 1|[diskann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/diskann-bare-metal/README.md)|Microsoft Research India(*org*)|Dell PowerEdge |final|**0.410**|[0.99821](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/diskann-bare-metal/deep-1B_recall.png) |[0.99976](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/diskann-bare-metal/bigann-1B_recall.png) |[0.99444](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/diskann-bare-metal/msturing-1B_recall.png) |[0.99342](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/diskann-bare-metal/msspacev-1B_recall.png) |[0.98130](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/diskann-bare-metal/text2image-1B_recall.png) |- | -| 2|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md)|Intel|Intel Optane |final|**0.409**|[0.99882](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/deep-1B_recall.png) |[0.99978](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/bigann-1B_recall.png) |[0.99568](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/msturing-1B_recall.png) |[0.99835](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/msspacev-1B_recall.png) |[0.97340](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/text2image-1B_recall.png) |- | -| 3|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md)|NVidia|NVidia GPU |final|**0.368**|[0.99541](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/deep-1B_recall.png) |[0.99882](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/bigann-1B_recall.png) |[0.98993](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/msturing-1B_recall.png) |[0.99428](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/msspacev-1B_recall.png) |[0.94691](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/text2image-1B_recall.png) |- | -| 4|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md)|GSI Technology(*org*)|LedaE APU |final|**0.339**|[0.99208](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/deep-1B_recall.png) |[0.99328](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/bigann-1B_recall.png) |[0.97841](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/msturing-1B_recall.png) |[0.98622](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/msspacev-1B_recall.png) |[0.92855](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/text2image-1B_recall.png) |[0.99684](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/ssnpp-1B_recall.png) | -| 5|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md)|NVidia|NVidia GPU |final|**0.268**|[0.99504](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_multigpu/deep-1B_recall.png) |[0.99815](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_multigpu/bigann-1B_recall.png) |[0.98399](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_multigpu/msturing-1B_recall.png) |[0.98785](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_multigpu/msspacev-1B_recall.png) |- |- | -| 6|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md)|Facebook Research(*org*)|NVidia GPU |final|**baseline**|[0.94275](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/deep-1B_recall.png) |[0.93260](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/bigann-1B_recall.png) |[0.91322](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/msturing-1B_recall.png) |[0.90853](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/msspacev-1B_recall.png) |[0.86028](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/text2image-1B_recall.png) |[0.97863](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/ssnpp-1B_recall.png) | - -* A submission must support at least 3 databases to qualify for this ranking. -* The ranking is based on the score, which is the sum of benchmark improvements of qualifying databases (shown in specific database columns after the score column.) -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *final* = final submission - * *inprog* = algorithm development still in progress - -#### Throughput Rankings - -|Rank|Submission |Team |Hardware|Status |Score |[Deep1B](#deep1B-throughput-rankings)|[BigANN](#bigann-throughput-rankings)|[MSTuring](#msturing-throughput-rankings)|[MSSpace](#msspace-throughput-rankings)|[Text2Image](#text2image-throughput-rankings)|[FBSSNet](#fbsimsearchnet-throughput-rankings)| -|----|------------------|-------|--------|-------|-----------|------|------|--------|-------|----------|--------------| -| 1|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md)|NVidia|NVidia GPU |final|**3001623.821**|[816,807](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_multigpu/deep-1B_throughput.png) |[767,653](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_multigpu/bigann-1B_throughput.png) |[586,722](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_multigpu/msturing-1B_throughput.png) |[844,287](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_multigpu/msspacev-1B_throughput.png) |- |- | -| 2|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md)|Intel|Intel Optane |final|**853256.879**|[196,546](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/deep-1B_throughput.png) |[335,991](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/bigann-1B_throughput.png) |[161,463](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/msturing-1B_throughput.png) |[157,828](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/msspacev-1B_throughput.png) |[17,063](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/text2image-1B_throughput.png) |- | -| 3|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md)|NVidia|NVidia GPU |final|**401541.475**|[91,938](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/deep-1B_throughput.png) |[85,446](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/bigann-1B_throughput.png) |[110,830](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/msturing-1B_throughput.png) |[109,621](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/msspacev-1B_throughput.png) |[19,340](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/text2image-1B_throughput.png) |- | -| 4|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md)|GSI Technology(*org*)|LedaE APU |final|**52429.395**|[10,704](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/deep-1B_throughput.png) |[10,672](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/bigann-1B_throughput.png) |[21,780](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/msturing-1B_throughput.png) |[16,422](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/msspacev-1B_throughput.png) |[4,838](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/text2image-1B_throughput.png) |[9,345](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/ssnpp-1B_throughput.png) | -| 5|[diskann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/diskann-bare-metal/README.md)|Microsoft Research India(*org*)|Dell PowerEdge |final|**49398.127**|[12,927](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/diskann-bare-metal/deep-1B_throughput.png) |[19,094](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/diskann-bare-metal/bigann-1B_throughput.png) |[17,201](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/diskann-bare-metal/msturing-1B_throughput.png) |[6,503](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/diskann-bare-metal/msspacev-1B_throughput.png) |[9,307](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/diskann-bare-metal/text2image-1B_throughput.png) |- | -| 6|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md)|Facebook Research(*org*)|NVidia GPU |final|**baseline**|[4,464](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/deep-1B_throughput.png) |[3,271](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/bigann-1B_throughput.png) |[2,845](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/msturing-1B_throughput.png) |[3,265](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/msspacev-1B_throughput.png) |[1,789](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/text2image-1B_throughput.png) |[5,699](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/ssnpp-1B_throughput.png) | - -* A submission must support at least 3 databases to qualify for this ranking. -* The ranking is based on the score, which is the sum of benchmark improvements of qualifying databases (shown in specific database columns after the score column.) -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *final* = final submission - * *inprog* = algorithm development still in progress - -#### Power Rankings - -|Rank|Submission |Team |Hardware|Status |Score |[Deep1B](#deep1B-power-rankings)|[BigANN](#bigann-power-rankings)|[MSTuring](#msturing-power-rankings)|[MSSpace](#msspace-power-rankings)|[Text2Image](#text2image-power-rankings)|[FBSSNet](#fbsimsearchnet-power-rankings)| -|----|------------------|-------|--------|-------|-----------|------|------|--------|-------|-----|-----| -| 1|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md)|NVidia|NVidia GPU |final|**-0.691**|[0.0024](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/deep-1B_power.png) |[0.0023](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/bigann-1B_power.png) |[0.0016](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/msturing-1B_power.png) |[0.0017](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/msspacev-1B_power.png) |[0.0094](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/text2image-1B_power.png)|-| -| 2|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md)|Intel|Intel Optane |final|**-0.648**|[0.0041](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/deep-1B_power.png) |[0.0022](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/bigann-1B_power.png) |[0.0048](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/msturing-1B_power.png) |[0.0049](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/msspacev-1B_power.png) |[0.0446](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/text2image-1B_power.png)|-| -| 3|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md)|NVidia|NVidia GPU |final|**-0.594**|[0.0002](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_multigpu/deep-1B_power.png) |[0.0003](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_multigpu/bigann-1B_power.png) |[0.0004](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_multigpu/msturing-1B_power.png) |[0.0002](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_multigpu/msspacev-1B_power.png) |-|-| -| 4|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md)|GSI Technology(*org*)|LedaE APU |final|**-0.513**|[0.0337](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/deep-1B_power.png) |[0.0341](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/bigann-1B_power.png) |[0.0236](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/msturing-1B_power.png) |[0.0230](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/msspacev-1B_power.png) |[0.1242](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/text2image-1B_power.png)|[0.0469](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/ssnpp-1B_power.png)| -| 5|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/diskann-bare-metal/README.md)|Facebook Research(*org*)|NVidia GPU |final|**baseline**|[0.1117](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/deep-1B_power.png) |[0.1576](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/bigann-1B_power.png) |[0.1743](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/msturing-1B_power.png) |[0.1520](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/msspacev-1B_power.png) |[0.1128](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/text2image-1B_power.png)|[0.0904](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/ssnpp-1B_power.png)| -| 6|[-](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md)|-|- |-|**-**|- |- |- |- |-|-| - -* A submission must support at least 3 databases to qualify for this ranking. -* The ranking is based on the score, which is the sum of benchmark improvements of qualifying databases (shown in specific database columns after the score column.) -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *final* = final submission - * *inprog* = algorithm development still in progress - -#### Cost Rankings - -|Rank|Submission |Team |Hardware|Status |Score |[Deep1B](#deep1B-cost-rankings)|[BigANN](#bigann-cost-rankings)|[MSTuring](#msturing-cost-rankings)|[MSSpace](#msspace-cost-rankings)|[Text2Image](#text2image-cost-rankings)|[FBSSNet](#fbsimsearchnet-cost-rankings)| -|----|------------------|-------|--------|-------|-----------|------|------|--------|-------|----------|--------------| -| 1|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md)|Intel|Intel Optane |final|**$-3,978,198.83**|$16,086.82 |$15,439.92 |$16,347.45 |$16,382.81 |$103,599.49 |- | -| 2|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md)|NVidia|NVidia GPU |final|**$-2,339,919.09\*\***|$300,843.83 |$300,815.92 |$150,563.49 |$150,605.68 |$903,307.30 |- | -| 3|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md)|NVidia|NVidia GPU |final|**$-2,272,942.67\*\***|$150,082.04 |$150,088.58 |$150,127.39 |$150,078.78 |- |- | -| 4|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md)|GSI Technology(*org*)|LedaE APU |final|**$-907,570.13**|$569,058.09 |$569,210.35 |$286,911.87 |$398,163.18 |$1,213,773.56 |$629,442.91 | -| 5|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md)|Facebook Research(*org*)|NVidia GPU |final|**baseline**|$545,633.16 |$737,886.17 |$853,857.46 |$735,942.66 |$1,272,735.86 |$428,074.79 | -| 6|[-](-)|-|- |-|**-**|- |- |- |- |- |- | - -* A submission must support at least 3 databases to qualify for this ranking. -* The ranking is based on the score, which is the sum of benchmark improvements of qualifying databases (shown in specific database columns after the score column.) -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *final* = final submission - * *inprog* = algorithm development still in progress - -* \*\*Nvidia has not yet approved the MSRP cost used for ranking, so participation in this benchmark is still pending. - -### Rankings Per Database - -#### Deep1B - -##### Deep1B Recall Rankings - -|Rank|Submission |Team |Hardware |Status |R@10 | -|----|-------------------------------------------------------|------------------------------|-----------------------|--------|-----------| -| 1|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md) |Intel |Intel Optane |final|**[0.99882](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/deep-1B_recall.png)**| -| 2|[diskann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/diskann-bare-metal/README.md) |Microsoft Research India(*org*) |Dell PowerEdge |final|**[0.99821](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/diskann-bare-metal/deep-1B_recall.png)**| -| 3|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md) |NVidia |NVidia GPU |final|**[0.99541](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/deep-1B_recall.png)**| -| 4|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md) |NVidia |NVidia GPU |final|**[0.99504](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_multigpu/deep-1B_recall.png)**| -| 5|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |final|**[0.99208](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/deep-1B_recall.png)**| -| 6|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |final|**[0.94275](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/deep-1B_recall.png)**| - -* The operational point for ranking is 2000 QPS. We will use the highest recall for the search parameters that meet or exceed 2000 QPS. -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *final* = final submission - * *inprog* = algorithm development still in progress - -##### Deep1B Throughput Rankings - -|Rank|Submission |Team |Hardware |Status |Q/S | -|----|-------------------------------------------------------|------------------------------|-----------------------|--------|-----------| -| 1|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md) |NVidia |NVidia GPU |final|**[816,807](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_multigpu/deep-1B_throughput.png)**| -| 2|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md) |Intel |Intel Optane |final|**[196,546](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/deep-1B_throughput.png)**| -| 3|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md) |NVidia |NVidia GPU |final|**[91,938](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/deep-1B_throughput.png)**| -| 4|[diskann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/diskann-bare-metal/README.md) |Microsoft Research India(*org*) |Dell PowerEdge |final|**[12,927](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/diskann-bare-metal/deep-1B_throughput.png)**| -| 5|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |final|**[10,704](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/deep-1B_throughput.png)**| -| 6|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |final|**[4,464](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/deep-1B_throughput.png)**| - -* The operational point for ranking is 0.90 recall@10. We will use the highest throughput for the search parameters that meet or exceed 0.90 recall@10. -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *final* = final submission - * *inprog* = algorithm development still in progress - -##### Deep1B Power Rankings - -|Rank|Submission |Team |Hardware |Status |W*S/Q | -|----|-------------------------------------------------------|------------------------------|-----------------------|--------|-----------| -| 1|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md) |NVidia |NVidia GPU |final|**[0.0002](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_multigpu/deep-1B_power.png)**| -| 2|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md) |NVidia |NVidia GPU |final|**[0.0024](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/deep-1B_power.png)**| -| 3|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md) |Intel |Intel Optane |final|**[0.0041](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/deep-1B_power.png)**| -| 4|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |final|**[0.0337](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/deep-1B_power.png)**| -| 5|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |final|**[0.1117](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/deep-1B_power.png)**| -| 6|[-](-) |- |- |-|**-**| - -* The operational point for ranking is 0.90 recall@10. We will use the lowest power consumption for the search parameters that meet or exceed 0.90 recall@10. -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *final* = final submission - * *inprog* = algorithm development still in progress - -##### Deep1B Cost Rankings - -|Rank|Submission |Team |Hardware |Status |Cost |capex |opex |unit cost|units@100K qps|KwH*4yrs | -|----|--------------------|---------|-----------------------|--------|-------------|--------|--------|---------|--------------|---------| -| 1|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md)|Intel |Intel Optane |final|**$16,086.82** |$14,664.20|$1,422.62|$14,664.20 |1 |14,226.208| -| 2|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md)|NVidia |NVidia GPU |final|**$150,082.04\*\*** |$150,000.00|$82.04|$150,000.00 |1 |820.405| -| 3|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md)|NVidia |NVidia GPU |final|**$300,843.83\*\*** |$300,000.00|$843.83|$150,000.00 |2 |8,438.315| -| 4|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md)|Facebook Research(*org*) |NVidia GPU |final|**$545,633.16** |$506,503.70|$39,129.46|$22,021.90 |23 |391,294.584| -| 5|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md)|GSI Technology(*org*) |LedaE APU |final|**$569,058.09** |$557,266.60|$11,791.49|$55,726.66 |10 |117,914.908| -| 6|[-](-)|- |- |-|**-** |-|-|- |- |-| - -* The operational point for ranking is 0.90 recall@10. We will use the lowest power consumption/query for the search parameters that meet or exceed 0.90 recall@10. -* The formula is based on: - * Take the algorithm's throughput submitted to leaderboard, use it to scale no. of systems needed to scale to 100K qps (using ceiling to round up any decimal.) - * Capex = cost per system * scale no. - * Take w*s/q from algorithm's power metric submitted to leaderboard and convert to KwH/q. - * Multiply by total queries at 100K qps for 4 years = 4x365x24x60x60x100000 total queries - * Opex = (total queries over 4 years) * KwH/query * $0.10/KwH -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *final* = final submission - * *inprog* = algorithm development still in progress -* \*\*Nvidia has not yet approved the MSRP cost used for ranking, so participation in this benchmark is still pending. - -#### BigANN - -##### BigANN Recall Rankings - -|Rank|Submission |Team |Hardware |Status |R@10 | -|----|-------------------------------------------------------|------------------------------|-----------------------|--------|-------------| -| 1|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md) |Intel |Intel Optane |final|**[0.99978](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/bigann-1B_recall.png)** | -| 2|[diskann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/diskann-bare-metal/README.md) |Microsoft Research India(*org*) |Dell PowerEdge |final|**[0.99976](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/diskann-bare-metal/bigann-1B_recall.png)** | -| 3|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md) |NVidia |NVidia GPU |final|**[0.99882](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/bigann-1B_recall.png)** | -| 4|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md) |NVidia |NVidia GPU |final|**[0.99815](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_multigpu/bigann-1B_recall.png)** | -| 5|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |final|**[0.99328](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/bigann-1B_recall.png)** | -| 6|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |final|**[0.93260](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/bigann-1B_recall.png)** | - -* The operational point for ranking is 2000 QPS. We will use the highest recall for the search parameters that meet or exceed 2000 QPS. -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *final* = final submission - * *inprog* = algorithm development still in progress - -##### BigANN Throughput Rankings - -|Rank|Submission |Team |Hardware |Status |Q/S | -|----|-------------------------------------------------------|------------------------------|-----------------------|--------|-------------| -| 1|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md) |NVidia |NVidia GPU |final|**[767,653](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_multigpu/bigann-1B_throughput.png)** | -| 2|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md) |Intel |Intel Optane |final|**[335,991](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/bigann-1B_throughput.png)** | -| 3|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md) |NVidia |NVidia GPU |final|**[85,446](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/bigann-1B_throughput.png)** | -| 4|[diskann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/diskann-bare-metal/README.md) |Microsoft Research India(*org*) |Dell PowerEdge |final|**[19,094](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/diskann-bare-metal/bigann-1B_throughput.png)** | -| 5|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |final|**[10,672](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/bigann-1B_throughput.png)** | -| 6|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |final|**[3,271](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/bigann-1B_throughput.png)** | - -* The operational point for ranking is 0.90 recall@10. We will use the highest throughput for the search parameters that meet or exceed 0.90 recall@10. -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *final* = final submission - * *inprog* = algorithm development still in progress - -##### BigANN Power Rankings - -|Rank|Submission |Team |Hardware |Status |W*S/Q | -|----|-------------------------------------------------------|------------------------------|-----------------------|--------|-----------| -| 1|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md) |NVidia |NVidia GPU |final|**[0.0003](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_multigpu/bigann-1B_power.png)**| -| 2|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md) |Intel |Intel Optane |final|**[0.0022](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/bigann-1B_power.png)**| -| 3|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md) |NVidia |NVidia GPU |final|**[0.0023](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/bigann-1B_power.png)**| -| 4|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |final|**[0.0341](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/bigann-1B_power.png)**| -| 5|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |final|**[0.1576](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/bigann-1B_power.png)**| -| 6|[-](-) |- |- |-|**-**| - -* The operational point for ranking is 0.90 recall@10. We will use the lowest power consumption for the search parameters that meet or exceed 0.90 recall@10. -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *final* = final submission - * *inprog* = algorithm development still in progress - -##### BigANN Cost Rankings - -|Rank|Submission |Team |Hardware |Status |Cost |capex |opex |unit cost|units@100K qps|KwH*4yrs | -|----|--------------------|------------------------------|--------------------|--------|--------------|--------|--------|---------|--------------|---------| -| 1|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md)|Intel |Intel Optane |final|**$15,439.92** |$14,664.20|$775.72|$14,664.20 |1 |7,757.221| -| 2|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md)|NVidia |NVidia GPU |final|**$150,088.58\*\*** |$150,000.00|$88.58|$150,000.00 |1 |885.770| -| 3|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md)|NVidia |NVidia GPU |final|**$300,815.92\*\*** |$300,000.00|$815.92|$150,000.00 |2 |8,159.226| -| 4|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md)|GSI Technology(*org*) |LedaE APU |final|**$569,210.35** |$557,266.60|$11,943.75|$55,726.66 |10 |119,437.537| -| 5|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md)|Facebook Research(*org*) |NVidia GPU |final|**$737,886.17** |$682,678.90|$55,207.27|$22,021.90 |31 |552,072.703| -| 6|[-](-)|- |- |-|**-** |-|-|- |- |-| - -* The operational point for ranking is 0.90 recall@10. We will use the lowest power consumption/query for the search parameters that meet or exceed 0.90 recall@10. -* The formula is based on: - * Take the algorithm's throughput submitted to leaderboard, use it to scale no. of systems needed to scale to 100K qps (using ceiling to round up any decimal.) - * Capex = cost per system * scale no. - * Take w*s/q from algorithm's power metric submitted to leaderboard and convert to KwH/q. - * Multiply by total queries at 100K qps for 4 years = 4x365x24x60x60x100000 total queries over 4 years. - * Opex = (total queries over 4 years) * KwH/query * $0.10/KwH -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *final* = final submission - * *inprog* = algorithm development still in progress -* \*\*Nvidia has not yet approved the MSRP cost used for ranking, so participation in this benchmark is still pending. - -#### MSTuring - -##### MSTuring Recall Rankings - -|Rank|Submission |Team |Hardware |Status |R@10 | -|----|-------------------------------------------------------|------------------------------|-----------------------|--------|---------------| -| 1|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md) |Intel |Intel Optane |final|**[0.99568](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/msturing-1B_recall.png)** | -| 2|[diskann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/diskann-bare-metal/README.md) |Microsoft Research India(*org*) |Dell PowerEdge |final|**[0.99444](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/diskann-bare-metal/msturing-1B_recall.png)** | -| 3|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md) |NVidia |NVidia GPU |final|**[0.98993](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/msturing-1B_recall.png)** | -| 4|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md) |NVidia |NVidia GPU |final|**[0.98399](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_multigpu/msturing-1B_recall.png)** | -| 5|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |final|**[0.97841](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/msturing-1B_recall.png)** | -| 6|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |final|**[0.91322](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/msturing-1B_recall.png)** | - -* The operational point for ranking is 2000 QPS. We will use the highest recall for the search parameters that meet or exceed 2000 QPS. -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *final* = final submission - * *inprog* = algorithm development still in progress - -##### MSTuring Throughput Rankings - -|Rank|Submission |Team |Hardware |Status |Q/S | -|----|-------------------------------------------------------|------------------------------|-----------------------|--------|------------| -| 1|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md) |NVidia |NVidia GPU |final|**[586,722](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_multigpu/msturing-1B_throughput.png)** | -| 2|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md) |Intel |Intel Optane |final|**[161,463](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/msturing-1B_throughput.png)** | -| 3|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md) |NVidia |NVidia GPU |final|**[110,830](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/msturing-1B_throughput.png)** | -| 4|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |final|**[21,780](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/msturing-1B_throughput.png)** | -| 5|[diskann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/diskann-bare-metal/README.md) |Microsoft Research India(*org*) |Dell PowerEdge |final|**[17,201](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/diskann-bare-metal/msturing-1B_throughput.png)** | -| 6|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |final|**[2,845](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/msturing-1B_throughput.png)** | - -* The operational point for ranking is 0.90 recall@10. We will use the highest throughput for the search parameters that meet or exceed 0.90 recall@10. -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *final* = final submission - * *inprog* = algorithm development still in progress - -##### MSTuring Power Rankings - -|Rank|Submission |Team |Hardware |Status|W*S/Q | -|----|-------------------------------------------------------|------------------------------|-----------------------|------|--------------| -| 1|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md) |NVidia |NVidia GPU |final|**[0.0004](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_multigpu/msturing-1B_power.png)** | -| 2|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md) |NVidia |NVidia GPU |final|**[0.0016](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/msturing-1B_power.png)** | -| 3|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md) |Intel |Intel Optane |final|**[0.0048](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/msturing-1B_power.png)** | -| 4|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |final|**[0.0236](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/msturing-1B_power.png)** | -| 5|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |final|**[0.1743](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/msturing-1B_power.png)** | -| 6|[-](-) |- |- |-|**-** | - -* The operational point for ranking is 0.90 recall@10. We will use the lowest power consumption for the search parameters that meet or exceed 0.90 recall@10. -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *final* = final submission - * *inprog* = algorithm development still in progress - -##### MSTuring Cost Rankings - -|Rank|Submission |Team |Hardware |Status |Cost |capex |opex |unit cost|units@100K qps|KwH*4yrs | -|----|------------------------------------|------------------------------|-----------------------|--------|--------------|--------|--------|---------|--------------|----------| -| 1|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md) |Intel |Intel Optane |final|**$16,347.45** |$14,664.20|$1,683.25|$14,664.20 |1 |16,832.451 | -| 2|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md) |NVidia |NVidia GPU |final|**$150,127.39\*\*** |$150,000.00|$127.39|$150,000.00 |1 |1,273.870 | -| 3|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md) |NVidia |NVidia GPU |final|**$150,563.49\*\*** |$150,000.00|$563.49|$150,000.00 |1 |5,634.885 | -| 4|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |final|**$286,911.87** |$278,633.30|$8,278.57|$55,726.66 |5 |82,785.683 | -| 5|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |final|**$853,857.46** |$792,788.40|$61,069.06|$22,021.90 |36 |610,690.611 | -| 6|[-](-) |- |- |-|**-** |-|-|- |- |- | - -* The operational point for ranking is 0.90 recall@10. We will use the lowest power consumption/query for the search parameters that meet or exceed 0.90 recall@10. -* The formula is based on: - * Take the algorithm's throughput submitted to leaderboard, use it to scale no. of systems needed to scale to 100K qps (using ceiling to round up any decimal.) - * Capex = cost per system * scale no. - * Take w*s/q from algorithm's power metric submitted to leaderboard and convert to KwH/q. - * Multiply by total queries at 100K qps for 4 years = 4x365x24x60x60x100000 total queries over 4 years. - * Opex = (total queries over 4 years) * KwH/query * $0.10/KwH -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *final* = final submission - * *inprog* = algorithm development still in progress -* \*\*Nvidia has not yet approved the MSRP cost used for ranking, so participation in this benchmark is still pending. - -#### MSSpace - -##### MSSpace Recall Rankings - -|Rank|Submission |Team |Hardware |Status |R@10 | -|----|-------------------------------------------------------|------------------------------|-----------------------|--------|---------| -| 1|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md) |Intel |Intel Optane |final|**[0.99835](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/msspacev-1B_recall.png)** | -| 2|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md) |NVidia |NVidia GPU |final|**[0.99428](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/msspacev-1B_recall.png)** | -| 3|[diskann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/diskann-bare-metal/README.md) |Microsoft Research India(*org*) |Dell PowerEdge |final|**[0.99342](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/diskann-bare-metal/msspacev-1B_recall.png)** | -| 4|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md) |NVidia |NVidia GPU |final|**[0.98785](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_multigpu/msspacev-1B_recall.png)** | -| 5|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |final|**[0.98622](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/msspacev-1B_recall.png)** | -| 6|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |$MS6r_ST|**[0.90853](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/msspacev-1B_recall.png)** | - -* The operational point for ranking is 2000 QPS. We will use the highest recall for the search parameters that meet or exceed 2000 QPS. -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *final* = final submission - * *inprog* = algorithm development still in progress - -##### MSSpace Throughput Rankings - -|Rank|Submission |Team |Hardware |Status |Q/S | -|----|-------------------------------------------------------|------------------------------|-----------------------|--------|------------| -| 1|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md) |NVidia |NVidia GPU |final|**[844,287](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_multigpu/msspacev-1B_throughput.png)** | -| 2|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md) |Intel |Intel Optane |final|**[157,828](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/msspacev-1B_throughput.png)** | -| 3|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md) |NVidia |NVidia GPU |final|**[109,621](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/msspacev-1B_throughput.png)** | -| 4|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |final|**[16,422](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/msspacev-1B_throughput.png)** | -| 5|[diskann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/diskann-bare-metal/README.md) |Microsoft Research India(*org*) |Dell PowerEdge |final|**[6,503](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/diskann-bare-metal/msspacev-1B_throughput.png)** | -| 6|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |final|**[3,265](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/msspacev-1B_throughput.png)** | - -* The operational point for ranking is 0.9 recall@10. We will use the highest throughput for the search parameters that meet or exceed 0.9 recall@10. -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *final* = final submission - * *inprog* = algorithm development still in progress - -##### MSSpace Power Rankings - -|Rank|Submission |Team |Hardware |Status |W*S/Q | -|----|-------------------------------------------------------|------------------------------|-----------------------|--------|------------| -| 1|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md) |NVidia |NVidia GPU |final|**[0.0002](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_multigpu/msspacev-1B_power.png)** | -| 2|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md) |NVidia |NVidia GPU |final|**[0.0017](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/msspacev-1B_power.png)** | -| 3|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md) |Intel |Intel Optane |final|**[0.0049](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/msspacev-1B_power.png)** | -| 4|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |final|**[0.0230](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/msspacev-1B_power.png)** | -| 5|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |final|**[0.1520](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/msspacev-1B_power.png)** | -| 6|[-](-) |- |- |-|**-** | - -* The operational point for ranking is 0.9 recall@10. We will use the lowest power consumption for the search parameters that meet or exceed 0.9 recall@10. -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *final* = final submission - * *inprog* = algorithm development still in progress - -##### MSSpace Cost Rankings - -|Rank|Submission |Team |Hardware |Status |Cost |capex |opex |unit cost|units@100K qps|KwH*4yrs | -|----|--------------------|------------------------------|-----------------------|------- |--------------|--------|--------|---------|--------------|----------| -| 1|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md)|Intel |Intel Optane |final|**$16,382.81** |$14,664.20|$1,718.61|$14,664.20 |1 |17,186.056 | -| 2|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md)|NVidia |NVidia GPU |final|**$150,078.78\*\*** |$150,000.00|$78.78|$150,000.00 |1 |787.774 | -| 3|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md)|NVidia |NVidia GPU |final|**$150,605.68\*\*** |$150,000.00|$605.68|$150,000.00 |1 |6,056.841 | -| 4|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md)|GSI Technology(*org*) |LedaE APU |final|**$398,163.18** |$390,086.62|$8,076.56|$55,726.66 |7 |80,765.638 | -| 5|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md)|Facebook Research(*org*) |NVidia GPU |final|**$735,942.66** |$682,678.90|$53,263.76|$22,021.90 |31 |532,637.584 | -| 6|[-](-)|- |- |-|**-** |-|-|- |- |- | - -* The operational point for ranking is 0.90 recall@10. We will use the lowest power consumption/query for the search parameters that meet or exceed 0.90 recall@10. -* The formula is based on: - * Take the algorithm's throughput submitted to leaderboard, use it to scale no. of systems needed to scale to 100K qps (using ceiling to round up any decimal.) - * Capex = cost per system * scale no. - * Take w*s/q from algorithm's power metric submitted to leaderboard and convert to KwH/q. - * Multiply by total queries at 100K qps for 4 years = 4x365x24x60x60x100000 total queries over 4 years. - * Opex = (total queries over 4 years) * KwH/query * $0.10/KwH -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *final* = final submission - * *inprog* = algorithm development still in progress -* \*\*Nvidia has not yet approved the MSRP cost used for ranking, so participation in this benchmark is still pending. - -#### Text2Image - -##### Text2Image Recall Rankings - -|Rank|Submission |Team |Hardware |Status |R@10 | -|----|-------------------------------------------------------|------------------------------|-----------------------|--------|-------------| -| 1|[diskann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/diskann-bare-metal/README.md) |Microsoft Research India(*org*) |Dell PowerEdge |final|**[0.98130](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/diskann-bare-metal/text2image-1B_recall.png)** | -| 2|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md) |Intel |Intel Optane |final|**[0.97340](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/text2image-1B_recall.png)** | -| 3|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md) |NVidia |NVidia GPU |final|**[0.94691](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/text2image-1B_recall.png)** | -| 4|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |final|**[0.92855](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/text2image-1B_recall.png)** | -| 5|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |final|**[0.86028](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/text2image-1B_recall.png)** | -| 6|[-](-) |- |- |-|**-** | - -* The operational point for ranking is 2000 QPS. We will use the highest recall for the search parameters that meet or exceed 2000 QPS. -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *final* = final submission - * *inprog* = algorithm development still in progress - -##### Text2Image Throughput Rankings - -|Rank|Submission |Team |Hardware |Status |Q/S | -|----|-------------------------------------------------------|------------------------------|-----------------------|--------|------------| -| 1|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md) |NVidia |NVidia GPU |final|**[19,340](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/text2image-1B_throughput.png)** | -| 2|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md) |Intel |Intel Optane |final|**[17,063](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/text2image-1B_throughput.png)** | -| 3|[diskann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/diskann-bare-metal/README.md) |Microsoft Research India(*org*) |Dell PowerEdge |final|**[9,307](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/diskann-bare-metal/text2image-1B_throughput.png)** | -| 4|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |final|**[4,838](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/text2image-1B_throughput.png)** | -| 5|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |final|**[1,789](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/text2image-1B_throughput.png)** | -| 6|[-](-) |- |- |-|**-** | - -* The operational point for ranking is 0.860 recall@10. We will use the highest throughput for the search parameters that meet or exceed 0.860 recall@10. -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *final* = final submission - * *inprog* = algorithm development still in progress - -##### Text2Image Power Rankings - -|Rank|Submission |Team |Hardware |Status |W*S/Q | -|----|-------------------------------------------------------|------------------------------|-----------------------|--------|-----------| -| 1|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md) |NVidia |NVidia GPU |final|**[0.0094](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/text2image-1B_power.png)**| -| 2|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md) |Intel |Intel Optane |final|**[0.0446](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/text2image-1B_power.png)**| -| 3|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |final|**[0.1128](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/text2image-1B_power.png)**| -| 4|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |final|**[0.1242](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/text2image-1B_power.png)**| -| 5|[-](-) |- |- |-|**-**| -| 6|[-](-) |- |- |-|**-**| - -* The operational point for ranking is 0.86 recall@10. We will use the lowest power consumption for the search parameters that meet or exceed 0.86 recall@10. -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *final* = final submission - * *inprog* = algorithm development still in progress - -##### Text2Image Cost Rankings - -|Rank|Submission |Team |Hardware |Status |Cost |capex |opex |unit cost|units@100K qps|KwH*4yrs | -|----|---------------------|------------------------------|---------------------|--------|--------------|--------|--------|---------|--------------|---------| -| 1|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md) |Intel |Intel Optane |final|**$103,599.49** |$87,985.20|$15,614.29|$14,664.20 |6 |156,142.873| -| 2|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md) |NVidia |NVidia GPU |final|**$903,307.30\*\*** |$900,000.00|$3,307.30|$150,000.00 |6 |33,072.963| -| 3|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |final|**$1,213,773.56** |$1,170,259.86|$43,513.70|$55,726.66 |21 |435,137.010| -| 4|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |final|**$1,272,735.86** |$1,233,226.40|$39,509.46|$22,021.90 |56 |395,094.625| -| 5|[-](-) |- |- |-|**-** |-|-|- |- |-| -| 6|[-](-) |- |- |-|**-** |-|-|- |- |-| - -* The operational point for ranking is 0.86 recall@10. We will use the lowest power consumption/query for the search parameters that meet or exceed 0.86 recall@10. -* The formula is based on: - * Take the algorithm's throughput submitted to leaderboard, use it to scale no. of systems needed to scale to 100K qps (using ceiling to round up any decimal.) - * Capex = cost per system * scale no. - * Take w*s/q from algorithm's power metric submitted to leaderboard and convert to KwH/q. - * Multiply by total queries at 100K qps for 4 years = 4x365x24x60x60x100000 total queries over 4 years. - * Opex = (total queries over 4 years) * KwH/query * $0.10/KwH -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *final* = final submission - * *inprog* = algorithm development still in progress -* \*\*Nvidia has not yet approved the MSRP cost used for ranking, so participation in this benchmark is still pending. - -#### FBSimSearchNet - -##### FBSimSearchNet AP Rankings - -|Rank|Submission |Team |Hardware |Status |AP | -|----|-------------------------------------------------------|------------------------------|-----------------------|--------|-------------| -| 1|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |final|**[0.99684](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/ssnpp-1B_recall.png)** | -| 2|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |final|**[0.97863](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/ssnpp-1B_recall.png)** | -| 3|[-](-) |- |- |-|**-** | -| 4|[-](-) |- |- |-|**-** | -| 5|[-](-) |- |- |-|**-** | -| 6|[-](-) |- |- |-|**-** | - -* The operational point for ranking is 2000 QPS. We will use the highest recall for the search parameters that meet or exceed 2000 QPS. -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *final* = final submission - * *inprog* = algorithm development still in progress - -##### FBSimSearchNet Throughput Rankings - -|Rank|Submission |Team |Hardware |Status |Q/S | -|----|-------------------------------------------------------|------------------------------|-----------------------|--------|------------| -| 1|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |final|**[9,345](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/ssnpp-1B_throughput.png)** | -| 2|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |final|**[5,699](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/ssnpp-1B_throughput.png)** | -| 3|[-](-) |- |- |-|**-** | -| 4|[-](-) |- |- |-|**-** | -| 5|[-](-) |- |- |-|**-** | -| 6|[-](-) |- |- |-|**-** | - -* The operational point for ranking is 0.9 recall@10. We will use the highest throughput for the search parameters that meet or exceed 0.9 average precision. -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *final* = final submission - * *inprog* = algorithm development still in progress - - -##### FBSimSearchNet Power Rankings - -|Rank|Submission |Team |Hardware |Status |W*S/Q | -|----|-------------------------------------------------------|------------------------------|-----------------------|--------|-----------| -| 1|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |final|**[0.0469](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/ssnpp-1B_power.png)**| -| 2|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |final|**[0.0904](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/ssnpp-1B_power.png)**| -| 3|[-](-) |- |- |-|**-**| -| 4|[-](-) |- |- |-|**-**| -| 5|[-](-) |- |- |-|**-**| -| 6|[-](-) |- |- |-|**-**| - -* The operational point for ranking is 0.9 recall@10. We will use the lowest power consumption for the search parameters that meet or exceed 0.9 average precision. -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *final* = final submission - * *inprog* = algorithm development still in progress - -##### FBSimSearchNet Cost Rankings - -|Rank|Submission |Team |Hardware |Status |Cost |capex |opex |unit cost|units@100K qps|KwH*4yrs | -|----|--------------------|------------------------------|---------------------|--------|--------------|--------|--------|---------|--------------|---------| -| 1|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md)|Facebook Research(*org*) |NVidia GPU |final|**$428,074.79** |$396,394.20|$31,680.59|$22,021.90 |18 |316,805.859| -| 2|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md)|GSI Technology(*org*) |LedaE APU |final|**$629,442.91** |$612,993.26|$16,449.65|$55,726.66 |11 |164,496.451| -| 3|[-](-)|- |- |-|**-** | -| -| -| -| -| -| 4|[-](-)|- |- |-|**-** | -| -| -| -| -| - -* The operational point for ranking is 0.9 recall@10. We will use the lowest power consumption/query for the search parameters that meet or exceed 0.9 recall@10. -* The formula is based on: - * Take the algorithm's throughput submitted to leaderboard, use it to scale no. of systems needed to scale to 100K qps (using ceiling to round up any decimal.) - * Capex = cost per system * scale no. - * Take w*s/q from algorithm's power metric submitted to leaderboard and convert to KwH/q. - * Multiply by total queries at 100K qps for 4 years = 4x365x24x60x60x100000 total queries over 4 years. - * Opex = (total queries over 4 years) * KwH/query * $0.10/KwH -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *final* = final submission - * *inprog* = algorithm development still in progress -* \*\*Nvidia has not yet approved the MSRP cost used for ranking, so participation in this benchmark is still pending. diff --git a/t3/LB_history/Dec.2.2021/LEADERBOARDS_REJECT_ANOMALIES.md b/t3/LB_history/Dec.2.2021/LEADERBOARDS_REJECT_ANOMALIES.md deleted file mode 100644 index ab8ce9cef..000000000 --- a/t3/LB_history/Dec.2.2021/LEADERBOARDS_REJECT_ANOMALIES.md +++ /dev/null @@ -1,571 +0,0 @@ -# T3 Track Leaderboards After Rejecting Anomalies (Unofficial) - -Please note that all rankings are currently unofficial due to the following reasons: -* All [open tasks and issues](TASKS_ISSUES_RESOLUTIONS.md) must be resolved. - -## Final Rankings On Private Query Set - -*Not yet available* - -## Rankings On Public Query Set - -### Rankings By Submission Name (alphabetical) - -|Submission |Team |Hardware |[Recall Rank](#recall-or-ap-rankings)|[Thru-put Rank](#throughput-rankings)|[Power Rank](#power-rankings)|[Cost Rank](#cost-rankings)|Status |Anomalies|Evaluator|Algo |Runs | -|--------------------|-----------|----------|---------|---------|---------|--------|---------|---------|---------|---------|--------| -|[diskann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/diskann-bare-metal/README.md) |Microsoft Research India(*org*) |Dell PowerEdge |[1](#recall-or-ap-rankings) |[5](#throughput-rankings) |*NQ* |*NQ* |final |*NA* |Harsha Simhadri |[src](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/benchmark/algorithms/diskann-t2.py) |[nb](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/diskann-bare-metal/EvalPublic.ipynb) | -|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |[6](#recall-or-ap-rankings) |[6](#throughput-rankings) |[5](#power-rankings) |[5](#cost-rankings) |final |0/58 |George Williams |[src](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/benchmark/algorithms/faiss_t3.py) |[nb](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/EvalPublic.ipynb) | -|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |[4](#recall-or-ap-rankings) |[4](#throughput-rankings) |[4](#power-rankings) |[4](#cost-rankings) |final |0/60 |George Williams |[src](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/benchmark/algorithms/gemini.py) |[nb](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/EvalPublic.ipynb) | -|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md) |NVidia |NVidia GPU |[3](#recall-or-ap-rankings) |[3](#throughput-rankings) |[1](#power-rankings) |[2](#cost-rankings)\*\* |final |[5/50](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/ANOMALIES.md) |George Williams |[src](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/benchmark/algorithms/cuanns_ivfpq.py) |[nb](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/EvalPublic.ipynb) | -|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md) |NVidia |NVidia GPU |[5](#recall-or-ap-rankings) |[1](#throughput-rankings) |[3](#power-rankings) |[3](#cost-rankings)\*\* |final |[4/40](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/ANOMALIES.md) |George Williams |[src](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/benchmark/algorithms/cuanns_multigpu.py) |[nb](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_multigpu/EvalPublic.ipynb) | -|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md)|Intel |Intel Optane |[2](#recall-or-ap-rankings) |[2](#throughput-rankings) |[2](#power-rankings) |[1](#cost-rankings)|final |[5/50](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/ANOMALIES.md) |George Williams |[src](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/benchmark/algorithms/graphann.py) |[nb](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/EvalPublic.ipynb)| - -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *final* = final submission - * *inprog* = algorithm development still in progress - * *NQ* = not qualified - * *NA* = data is not yet available, or has not yet been processed - -* *Anomalies* are defined as queries that could potentially be the result of query response caching, a violation of the competition. Our detection method looks for a 30% or more improvement in the batch query latency between the first and last query of a query group (5). Participants have been given a chance to explain why detected anomalies (if any) are not a result of query response caching. - -* \*\*Nvidia has not yet approved the MSRP cost used for ranking, so participation in this benchmark is still pending. - -### Rankings Per Metric - -#### Recall Or AP Rankings - -|Rank|Submission |Team |Hardware|Status |Score |[Deep1B](#deep1B-recall-rankings)|[BigANN](#bigann-recall-rankings)|[MSTuring](#msturing-recall-rankings)|[MSSpace](#msspace-recall-rankings)|[Text2Image](#text2image-recall-rankings)|[FBSSNet](#fbsimsearchnet-ap-rankings)| -|----|------------------|-------|--------|-------|-----------|------|------|--------|-------|----------|-------| -| 1|[diskann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/diskann-bare-metal/README.md)|Microsoft Research India(*org*)|Dell PowerEdge |final|**0.410**|[0.99821](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/diskann-bare-metal/deep-1B_recall.png) |[0.99976](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/diskann-bare-metal/bigann-1B_recall.png) |[0.99444](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/diskann-bare-metal/msturing-1B_recall.png) |[0.99342](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/diskann-bare-metal/msspacev-1B_recall.png) |[0.98130](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/diskann-bare-metal/text2image-1B_recall.png) |- | -| 2|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md)|Intel|Intel Optane |final|**0.409**|[0.99882](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/deep-1B_recall.png) |[0.99978](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/bigann-1B_recall.png) |[0.99568](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/msturing-1B_recall.png) |[0.99835](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/msspacev-1B_recall.png) |[0.97340](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/text2image-1B_recall.png) |- | -| 3|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md)|NVidia|NVidia GPU |final|**0.368**|[0.99541](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/deep-1B_recall.png) |[0.99882](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/bigann-1B_recall.png) |[0.98993](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/msturing-1B_recall.png) |[0.99428](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/msspacev-1B_recall.png) |[0.94691](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/text2image-1B_recall.png) |- | -| 4|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md)|GSI Technology(*org*)|LedaE APU |final|**0.339**|[0.99208](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/deep-1B_recall.png) |[0.99328](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/bigann-1B_recall.png) |[0.97841](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/msturing-1B_recall.png) |[0.98622](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/msspacev-1B_recall.png) |[0.92855](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/text2image-1B_recall.png) |[0.99684](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/ssnpp-1B_recall.png) | -| 5|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md)|NVidia|NVidia GPU |final|**0.166**|[0.95736](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_multigpu/deep-1B_recall.png) |[0.96750](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_multigpu/bigann-1B_recall.png) |[0.96286](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_multigpu/msturing-1B_recall.png) |[0.97541](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_multigpu/msspacev-1B_recall.png) |- |- | -| 6|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md)|Facebook Research(*org*)|NVidia GPU |final|**baseline**|[0.94275](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/deep-1B_recall.png) |[0.93260](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/bigann-1B_recall.png) |[0.91322](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/msturing-1B_recall.png) |[0.90853](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/msspacev-1B_recall.png) |[0.86028](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/text2image-1B_recall.png) |[0.97863](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/ssnpp-1B_recall.png) | - -* A submission must support at least 3 databases to qualify for this ranking. -* The ranking is based on the score, which is the sum of benchmark improvements of qualifying databases (shown in specific database columns after the score column.) -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *final* = final submission - * *inprog* = algorithm development still in progress - -#### Throughput Rankings - -|Rank|Submission |Team |Hardware|Status |Score |[Deep1B](#deep1B-throughput-rankings)|[BigANN](#bigann-throughput-rankings)|[MSTuring](#msturing-throughput-rankings)|[MSSpace](#msspace-throughput-rankings)|[Text2Image](#text2image-throughput-rankings)|[FBSSNet](#fbsimsearchnet-throughput-rankings)| -|----|------------------|-------|--------|-------|-----------|------|------|--------|-------|----------|--------------| -| 1|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md)|NVidia|NVidia GPU |final|**3001623.821**|[816,807](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_multigpu/deep-1B_throughput.png) |[767,653](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_multigpu/bigann-1B_throughput.png) |[586,722](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_multigpu/msturing-1B_throughput.png) |[844,287](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_multigpu/msspacev-1B_throughput.png) |- |- | -| 2|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md)|Intel|Intel Optane |final|**851327.044**|[196,546](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/deep-1B_throughput.png) |[335,991](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/bigann-1B_throughput.png) |[161,463](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/msturing-1B_throughput.png) |[155,899](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/msspacev-1B_throughput.png) |[17,063](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/text2image-1B_throughput.png) |- | -| 3|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md)|NVidia|NVidia GPU |final|**401541.475**|[91,938](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/deep-1B_throughput.png) |[85,446](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/bigann-1B_throughput.png) |[110,830](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/msturing-1B_throughput.png) |[109,621](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/msspacev-1B_throughput.png) |[19,340](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/text2image-1B_throughput.png) |- | -| 4|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md)|GSI Technology(*org*)|LedaE APU |final|**52429.395**|[10,704](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/deep-1B_throughput.png) |[10,672](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/bigann-1B_throughput.png) |[21,780](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/msturing-1B_throughput.png) |[16,422](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/msspacev-1B_throughput.png) |[4,838](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/text2image-1B_throughput.png) |[9,345](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/ssnpp-1B_throughput.png) | -| 5|[diskann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/diskann-bare-metal/README.md)|Microsoft Research India(*org*)|Dell PowerEdge |final|**49398.127**|[12,927](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/diskann-bare-metal/deep-1B_throughput.png) |[19,094](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/diskann-bare-metal/bigann-1B_throughput.png) |[17,201](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/diskann-bare-metal/msturing-1B_throughput.png) |[6,503](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/diskann-bare-metal/msspacev-1B_throughput.png) |[9,307](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/diskann-bare-metal/text2image-1B_throughput.png) |- | -| 6|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md)|Facebook Research(*org*)|NVidia GPU |final|**baseline**|[4,464](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/deep-1B_throughput.png) |[3,271](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/bigann-1B_throughput.png) |[2,845](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/msturing-1B_throughput.png) |[3,265](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/msspacev-1B_throughput.png) |[1,789](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/text2image-1B_throughput.png) |[5,699](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/ssnpp-1B_throughput.png) | - -* A submission must support at least 3 databases to qualify for this ranking. -* The ranking is based on the score, which is the sum of benchmark improvements of qualifying databases (shown in specific database columns after the score column.) -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *final* = final submission - * *inprog* = algorithm development still in progress - -#### Power Rankings - -|Rank|Submission |Team |Hardware|Status |Score |[Deep1B](#deep1B-power-rankings)|[BigANN](#bigann-power-rankings)|[MSTuring](#msturing-power-rankings)|[MSSpace](#msspace-power-rankings)|[Text2Image](#text2image-power-rankings)|[FBSSNet](#fbsimsearchnet-power-rankings)| -|----|------------------|-------|--------|-------|-----------|------|------|--------|-------|-----|-----| -| 1|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md)|NVidia|NVidia GPU |final|**-0.691**|[0.0024](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/deep-1B_power.png) |[0.0023](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/bigann-1B_power.png) |[0.0016](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/msturing-1B_power.png) |[0.0017](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/msspacev-1B_power.png) |[0.0094](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/text2image-1B_power.png)|-| -| 2|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md)|Intel|Intel Optane |final|**-0.648**|[0.0041](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/deep-1B_power.png) |[0.0022](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/bigann-1B_power.png) |[0.0048](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/msturing-1B_power.png) |[0.0050](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/msspacev-1B_power.png) |[0.0446](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/text2image-1B_power.png)|-| -| 3|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md)|NVidia|NVidia GPU |final|**-0.594**|[0.0002](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_multigpu/deep-1B_power.png) |[0.0003](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_multigpu/bigann-1B_power.png) |[0.0004](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_multigpu/msturing-1B_power.png) |[0.0002](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_multigpu/msspacev-1B_power.png) |-|-| -| 4|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md)|GSI Technology(*org*)|LedaE APU |final|**-0.513**|[0.0337](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/deep-1B_power.png) |[0.0341](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/bigann-1B_power.png) |[0.0236](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/msturing-1B_power.png) |[0.0230](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/msspacev-1B_power.png) |[0.1242](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/text2image-1B_power.png)|[0.0469](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/ssnpp-1B_power.png)| -| 5|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/diskann-bare-metal/README.md)|Facebook Research(*org*)|NVidia GPU |final|**baseline**|[0.1117](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/deep-1B_power.png) |[0.1576](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/bigann-1B_power.png) |[0.1743](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/msturing-1B_power.png) |[0.1520](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/msspacev-1B_power.png) |[0.1128](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/text2image-1B_power.png)|[0.0904](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/ssnpp-1B_power.png)| -| 6|[-](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md)|-|- |-|**-**|- |- |- |- |-|-| - -* A submission must support at least 3 databases to qualify for this ranking. -* The ranking is based on the score, which is the sum of benchmark improvements of qualifying databases (shown in specific database columns after the score column.) -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *final* = final submission - * *inprog* = algorithm development still in progress - -#### Cost Rankings - -|Rank|Submission |Team |Hardware|Status |Score |[Deep1B](#deep1B-cost-rankings)|[BigANN](#bigann-cost-rankings)|[MSTuring](#msturing-cost-rankings)|[MSSpace](#msspace-cost-rankings)|[Text2Image](#text2image-cost-rankings)|[FBSSNet](#fbsimsearchnet-cost-rankings)| -|----|------------------|-------|--------|-------|-----------|------|------|--------|-------|----------|--------------| -| 1|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md)|Intel|Intel Optane |final|**$-3,978,172.56**|$16,086.82 |$15,439.92 |$16,347.45 |$16,409.08 |$103,599.49 |- | -| 2|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md)|NVidia|NVidia GPU |final|**$-2,339,919.09\*\***|$300,843.83 |$300,815.92 |$150,563.49 |$150,605.68 |$903,307.30 |- | -| 3|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md)|NVidia|NVidia GPU |final|**$-2,272,942.67\*\***|$150,082.04 |$150,088.58 |$150,127.39 |$150,078.78 |- |- | -| 4|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md)|GSI Technology(*org*)|LedaE APU |final|**$-907,570.13**|$569,058.09 |$569,210.35 |$286,911.87 |$398,163.18 |$1,213,773.56 |$629,442.91 | -| 5|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md)|Facebook Research(*org*)|NVidia GPU |final|**baseline**|$545,633.16 |$737,886.17 |$853,857.46 |$735,942.66 |$1,272,735.86 |$428,074.79 | -| 6|[-](-)|-|- |-|**-**|- |- |- |- |- |- | - -* A submission must support at least 3 databases to qualify for this ranking. -* The ranking is based on the score, which is the sum of benchmark improvements of qualifying databases (shown in specific database columns after the score column.) -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *final* = final submission - * *inprog* = algorithm development still in progress - -* \*\*Nvidia has not yet approved the MSRP cost used for ranking, so participation in this benchmark is still pending. - -### Rankings Per Database - -#### Deep1B - -##### Deep1B Recall Rankings - -|Rank|Submission |Team |Hardware |Status |R@10 | -|----|-------------------------------------------------------|------------------------------|-----------------------|--------|-----------| -| 1|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md) |Intel |Intel Optane |final|**[0.99882](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/deep-1B_recall.png)**| -| 2|[diskann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/diskann-bare-metal/README.md) |Microsoft Research India(*org*) |Dell PowerEdge |final|**[0.99821](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/diskann-bare-metal/deep-1B_recall.png)**| -| 3|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md) |NVidia |NVidia GPU |final|**[0.99541](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/deep-1B_recall.png)**| -| 4|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |final|**[0.99208](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/deep-1B_recall.png)**| -| 5|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md) |NVidia |NVidia GPU |final|**[0.95736](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_multigpu/deep-1B_recall.png)**| -| 6|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |final|**[0.94275](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/deep-1B_recall.png)**| - -* The operational point for ranking is 2000 QPS. We will use the highest recall for the search parameters that meet or exceed 2000 QPS. -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *final* = final submission - * *inprog* = algorithm development still in progress - -##### Deep1B Throughput Rankings - -|Rank|Submission |Team |Hardware |Status |Q/S | -|----|-------------------------------------------------------|------------------------------|-----------------------|--------|-----------| -| 1|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md) |NVidia |NVidia GPU |final|**[816,807](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_multigpu/deep-1B_throughput.png)**| -| 2|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md) |Intel |Intel Optane |final|**[196,546](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/deep-1B_throughput.png)**| -| 3|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md) |NVidia |NVidia GPU |final|**[91,938](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/deep-1B_throughput.png)**| -| 4|[diskann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/diskann-bare-metal/README.md) |Microsoft Research India(*org*) |Dell PowerEdge |final|**[12,927](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/diskann-bare-metal/deep-1B_throughput.png)**| -| 5|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |final|**[10,704](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/deep-1B_throughput.png)**| -| 6|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |final|**[4,464](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/deep-1B_throughput.png)**| - -* The operational point for ranking is 0.90 recall@10. We will use the highest throughput for the search parameters that meet or exceed 0.90 recall@10. -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *final* = final submission - * *inprog* = algorithm development still in progress - -##### Deep1B Power Rankings - -|Rank|Submission |Team |Hardware |Status |W*S/Q | -|----|-------------------------------------------------------|------------------------------|-----------------------|--------|-----------| -| 1|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md) |NVidia |NVidia GPU |final|**[0.0002](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_multigpu/deep-1B_power.png)**| -| 2|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md) |NVidia |NVidia GPU |final|**[0.0024](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/deep-1B_power.png)**| -| 3|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md) |Intel |Intel Optane |final|**[0.0041](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/deep-1B_power.png)**| -| 4|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |final|**[0.0337](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/deep-1B_power.png)**| -| 5|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |final|**[0.1117](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/deep-1B_power.png)**| -| 6|[-](-) |- |- |-|**-**| - -* The operational point for ranking is 0.90 recall@10. We will use the lowest power consumption for the search parameters that meet or exceed 0.90 recall@10. -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *final* = final submission - * *inprog* = algorithm development still in progress - -##### Deep1B Cost Rankings - -|Rank|Submission |Team |Hardware |Status |Cost |capex |opex |unit cost|units@100K qps|KwH*4yrs | -|----|--------------------|---------|-----------------------|--------|-------------|--------|--------|---------|--------------|---------| -| 1|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md)|Intel |Intel Optane |final|**$16,086.82** |$14,664.20|$1,422.62|$14,664.20 |1 |14,226.208| -| 2|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md)|NVidia |NVidia GPU |final|**$150,082.04\*\*** |$150,000.00|$82.04|$150,000.00 |1 |820.405| -| 3|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md)|NVidia |NVidia GPU |final|**$300,843.83\*\*** |$300,000.00|$843.83|$150,000.00 |2 |8,438.315| -| 4|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md)|Facebook Research(*org*) |NVidia GPU |final|**$545,633.16** |$506,503.70|$39,129.46|$22,021.90 |23 |391,294.584| -| 5|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md)|GSI Technology(*org*) |LedaE APU |final|**$569,058.09** |$557,266.60|$11,791.49|$55,726.66 |10 |117,914.908| -| 6|[-](-)|- |- |-|**-** |-|-|- |- |-| - -* The operational point for ranking is 0.90 recall@10. We will use the lowest power consumption/query for the search parameters that meet or exceed 0.90 recall@10. -* The formula is based on: - * Take the algorithm's throughput submitted to leaderboard, use it to scale no. of systems needed to scale to 100K qps (using ceiling to round up any decimal.) - * Capex = cost per system * scale no. - * Take w*s/q from algorithm's power metric submitted to leaderboard and convert to KwH/q. - * Multiply by total queries at 100K qps for 4 years = 4x365x24x60x60x100000 total queries - * Opex = (total queries over 4 years) * KwH/query * $0.10/KwH -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *final* = final submission - * *inprog* = algorithm development still in progress -* \*\*Nvidia has not yet approved the MSRP cost used for ranking, so participation in this benchmark is still pending. - -#### BigANN - -##### BigANN Recall Rankings - -|Rank|Submission |Team |Hardware |Status |R@10 | -|----|-------------------------------------------------------|------------------------------|-----------------------|--------|-------------| -| 1|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md) |Intel |Intel Optane |final|**[0.99978](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/bigann-1B_recall.png)** | -| 2|[diskann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/diskann-bare-metal/README.md) |Microsoft Research India(*org*) |Dell PowerEdge |final|**[0.99976](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/diskann-bare-metal/bigann-1B_recall.png)** | -| 3|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md) |NVidia |NVidia GPU |final|**[0.99882](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/bigann-1B_recall.png)** | -| 4|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |final|**[0.99328](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/bigann-1B_recall.png)** | -| 5|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md) |NVidia |NVidia GPU |final|**[0.96750](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_multigpu/bigann-1B_recall.png)** | -| 6|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |final|**[0.93260](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/bigann-1B_recall.png)** | - -* The operational point for ranking is 2000 QPS. We will use the highest recall for the search parameters that meet or exceed 2000 QPS. -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *final* = final submission - * *inprog* = algorithm development still in progress - -##### BigANN Throughput Rankings - -|Rank|Submission |Team |Hardware |Status |Q/S | -|----|-------------------------------------------------------|------------------------------|-----------------------|--------|-------------| -| 1|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md) |NVidia |NVidia GPU |final|**[767,653](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_multigpu/bigann-1B_throughput.png)** | -| 2|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md) |Intel |Intel Optane |final|**[335,991](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/bigann-1B_throughput.png)** | -| 3|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md) |NVidia |NVidia GPU |final|**[85,446](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/bigann-1B_throughput.png)** | -| 4|[diskann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/diskann-bare-metal/README.md) |Microsoft Research India(*org*) |Dell PowerEdge |final|**[19,094](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/diskann-bare-metal/bigann-1B_throughput.png)** | -| 5|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |final|**[10,672](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/bigann-1B_throughput.png)** | -| 6|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |final|**[3,271](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/bigann-1B_throughput.png)** | - -* The operational point for ranking is 0.90 recall@10. We will use the highest throughput for the search parameters that meet or exceed 0.90 recall@10. -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *final* = final submission - * *inprog* = algorithm development still in progress - -##### BigANN Power Rankings - -|Rank|Submission |Team |Hardware |Status |W*S/Q | -|----|-------------------------------------------------------|------------------------------|-----------------------|--------|-----------| -| 1|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md) |NVidia |NVidia GPU |final|**[0.0003](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_multigpu/bigann-1B_power.png)**| -| 2|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md) |Intel |Intel Optane |final|**[0.0022](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/bigann-1B_power.png)**| -| 3|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md) |NVidia |NVidia GPU |final|**[0.0023](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/bigann-1B_power.png)**| -| 4|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |final|**[0.0341](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/bigann-1B_power.png)**| -| 5|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |final|**[0.1576](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/bigann-1B_power.png)**| -| 6|[-](-) |- |- |-|**-**| - -* The operational point for ranking is 0.90 recall@10. We will use the lowest power consumption for the search parameters that meet or exceed 0.90 recall@10. -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *final* = final submission - * *inprog* = algorithm development still in progress - -##### BigANN Cost Rankings - -|Rank|Submission |Team |Hardware |Status |Cost |capex |opex |unit cost|units@100K qps|KwH*4yrs | -|----|--------------------|------------------------------|--------------------|--------|--------------|--------|--------|---------|--------------|---------| -| 1|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md)|Intel |Intel Optane |final|**$15,439.92** |$14,664.20|$775.72|$14,664.20 |1 |7,757.221| -| 2|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md)|NVidia |NVidia GPU |final|**$150,088.58\*\*** |$150,000.00|$88.58|$150,000.00 |1 |885.770| -| 3|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md)|NVidia |NVidia GPU |final|**$300,815.92\*\*** |$300,000.00|$815.92|$150,000.00 |2 |8,159.226| -| 4|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md)|GSI Technology(*org*) |LedaE APU |final|**$569,210.35** |$557,266.60|$11,943.75|$55,726.66 |10 |119,437.537| -| 5|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md)|Facebook Research(*org*) |NVidia GPU |final|**$737,886.17** |$682,678.90|$55,207.27|$22,021.90 |31 |552,072.703| -| 6|[-](-)|- |- |-|**-** |-|-|- |- |-| - -* The operational point for ranking is 0.90 recall@10. We will use the lowest power consumption/query for the search parameters that meet or exceed 0.90 recall@10. -* The formula is based on: - * Take the algorithm's throughput submitted to leaderboard, use it to scale no. of systems needed to scale to 100K qps (using ceiling to round up any decimal.) - * Capex = cost per system * scale no. - * Take w*s/q from algorithm's power metric submitted to leaderboard and convert to KwH/q. - * Multiply by total queries at 100K qps for 4 years = 4x365x24x60x60x100000 total queries over 4 years. - * Opex = (total queries over 4 years) * KwH/query * $0.10/KwH -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *final* = final submission - * *inprog* = algorithm development still in progress -* \*\*Nvidia has not yet approved the MSRP cost used for ranking, so participation in this benchmark is still pending. - -#### MSTuring - -##### MSTuring Recall Rankings - -|Rank|Submission |Team |Hardware |Status |R@10 | -|----|-------------------------------------------------------|------------------------------|-----------------------|--------|---------------| -| 1|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md) |Intel |Intel Optane |final|**[0.99568](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/msturing-1B_recall.png)** | -| 2|[diskann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/diskann-bare-metal/README.md) |Microsoft Research India(*org*) |Dell PowerEdge |final|**[0.99444](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/diskann-bare-metal/msturing-1B_recall.png)** | -| 3|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md) |NVidia |NVidia GPU |final|**[0.98993](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/msturing-1B_recall.png)** | -| 4|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |final|**[0.97841](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/msturing-1B_recall.png)** | -| 5|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md) |NVidia |NVidia GPU |final|**[0.96286](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_multigpu/msturing-1B_recall.png)** | -| 6|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |final|**[0.91322](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/msturing-1B_recall.png)** | - -* The operational point for ranking is 2000 QPS. We will use the highest recall for the search parameters that meet or exceed 2000 QPS. -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *final* = final submission - * *inprog* = algorithm development still in progress - -##### MSTuring Throughput Rankings - -|Rank|Submission |Team |Hardware |Status |Q/S | -|----|-------------------------------------------------------|------------------------------|-----------------------|--------|------------| -| 1|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md) |NVidia |NVidia GPU |final|**[586,722](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_multigpu/msturing-1B_throughput.png)** | -| 2|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md) |Intel |Intel Optane |final|**[161,463](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/msturing-1B_throughput.png)** | -| 3|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md) |NVidia |NVidia GPU |final|**[110,830](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/msturing-1B_throughput.png)** | -| 4|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |final|**[21,780](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/msturing-1B_throughput.png)** | -| 5|[diskann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/diskann-bare-metal/README.md) |Microsoft Research India(*org*) |Dell PowerEdge |final|**[17,201](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/diskann-bare-metal/msturing-1B_throughput.png)** | -| 6|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |final|**[2,845](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/msturing-1B_throughput.png)** | - -* The operational point for ranking is 0.90 recall@10. We will use the highest throughput for the search parameters that meet or exceed 0.90 recall@10. -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *final* = final submission - * *inprog* = algorithm development still in progress - -##### MSTuring Power Rankings - -|Rank|Submission |Team |Hardware |Status|W*S/Q | -|----|-------------------------------------------------------|------------------------------|-----------------------|------|--------------| -| 1|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md) |NVidia |NVidia GPU |final|**[0.0004](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_multigpu/msturing-1B_power.png)** | -| 2|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md) |NVidia |NVidia GPU |final|**[0.0016](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/msturing-1B_power.png)** | -| 3|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md) |Intel |Intel Optane |final|**[0.0048](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/msturing-1B_power.png)** | -| 4|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |final|**[0.0236](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/msturing-1B_power.png)** | -| 5|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |final|**[0.1743](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/msturing-1B_power.png)** | -| 6|[-](-) |- |- |-|**-** | - -* The operational point for ranking is 0.90 recall@10. We will use the lowest power consumption for the search parameters that meet or exceed 0.90 recall@10. -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *final* = final submission - * *inprog* = algorithm development still in progress - -##### MSTuring Cost Rankings - -|Rank|Submission |Team |Hardware |Status |Cost |capex |opex |unit cost|units@100K qps|KwH*4yrs | -|----|------------------------------------|------------------------------|-----------------------|--------|--------------|--------|--------|---------|--------------|----------| -| 1|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md) |Intel |Intel Optane |final|**$16,347.45** |$14,664.20|$1,683.25|$14,664.20 |1 |16,832.451 | -| 2|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md) |NVidia |NVidia GPU |final|**$150,127.39\*\*** |$150,000.00|$127.39|$150,000.00 |1 |1,273.870 | -| 3|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md) |NVidia |NVidia GPU |final|**$150,563.49\*\*** |$150,000.00|$563.49|$150,000.00 |1 |5,634.885 | -| 4|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |final|**$286,911.87** |$278,633.30|$8,278.57|$55,726.66 |5 |82,785.683 | -| 5|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |final|**$853,857.46** |$792,788.40|$61,069.06|$22,021.90 |36 |610,690.611 | -| 6|[-](-) |- |- |-|**-** |-|-|- |- |- | - -* The operational point for ranking is 0.90 recall@10. We will use the lowest power consumption/query for the search parameters that meet or exceed 0.90 recall@10. -* The formula is based on: - * Take the algorithm's throughput submitted to leaderboard, use it to scale no. of systems needed to scale to 100K qps (using ceiling to round up any decimal.) - * Capex = cost per system * scale no. - * Take w*s/q from algorithm's power metric submitted to leaderboard and convert to KwH/q. - * Multiply by total queries at 100K qps for 4 years = 4x365x24x60x60x100000 total queries over 4 years. - * Opex = (total queries over 4 years) * KwH/query * $0.10/KwH -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *final* = final submission - * *inprog* = algorithm development still in progress -* \*\*Nvidia has not yet approved the MSRP cost used for ranking, so participation in this benchmark is still pending. - -#### MSSpace - -##### MSSpace Recall Rankings - -|Rank|Submission |Team |Hardware |Status |R@10 | -|----|-------------------------------------------------------|------------------------------|-----------------------|--------|---------| -| 1|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md) |Intel |Intel Optane |final|**[0.99835](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/msspacev-1B_recall.png)** | -| 2|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md) |NVidia |NVidia GPU |final|**[0.99428](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/msspacev-1B_recall.png)** | -| 3|[diskann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/diskann-bare-metal/README.md) |Microsoft Research India(*org*) |Dell PowerEdge |final|**[0.99342](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/diskann-bare-metal/msspacev-1B_recall.png)** | -| 4|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |final|**[0.98622](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/msspacev-1B_recall.png)** | -| 5|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md) |NVidia |NVidia GPU |final|**[0.97541](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_multigpu/msspacev-1B_recall.png)** | -| 6|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |$MS6r_ST|**[0.90853](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/msspacev-1B_recall.png)** | - -* The operational point for ranking is 2000 QPS. We will use the highest recall for the search parameters that meet or exceed 2000 QPS. -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *final* = final submission - * *inprog* = algorithm development still in progress - -##### MSSpace Throughput Rankings - -|Rank|Submission |Team |Hardware |Status |Q/S | -|----|-------------------------------------------------------|------------------------------|-----------------------|--------|------------| -| 1|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md) |NVidia |NVidia GPU |final|**[844,287](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_multigpu/msspacev-1B_throughput.png)** | -| 2|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md) |Intel |Intel Optane |final|**[155,899](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/msspacev-1B_throughput.png)** | -| 3|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md) |NVidia |NVidia GPU |final|**[109,621](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/msspacev-1B_throughput.png)** | -| 4|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |final|**[16,422](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/msspacev-1B_throughput.png)** | -| 5|[diskann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/diskann-bare-metal/README.md) |Microsoft Research India(*org*) |Dell PowerEdge |final|**[6,503](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/diskann-bare-metal/msspacev-1B_throughput.png)** | -| 6|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |final|**[3,265](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/msspacev-1B_throughput.png)** | - -* The operational point for ranking is 0.9 recall@10. We will use the highest throughput for the search parameters that meet or exceed 0.9 recall@10. -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *final* = final submission - * *inprog* = algorithm development still in progress - -##### MSSpace Power Rankings - -|Rank|Submission |Team |Hardware |Status |W*S/Q | -|----|-------------------------------------------------------|------------------------------|-----------------------|--------|------------| -| 1|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md) |NVidia |NVidia GPU |final|**[0.0002](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_multigpu/msspacev-1B_power.png)** | -| 2|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md) |NVidia |NVidia GPU |final|**[0.0017](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/msspacev-1B_power.png)** | -| 3|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md) |Intel |Intel Optane |final|**[0.0050](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/msspacev-1B_power.png)** | -| 4|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |final|**[0.0230](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/msspacev-1B_power.png)** | -| 5|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |final|**[0.1520](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/msspacev-1B_power.png)** | -| 6|[-](-) |- |- |-|**-** | - -* The operational point for ranking is 0.9 recall@10. We will use the lowest power consumption for the search parameters that meet or exceed 0.9 recall@10. -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *final* = final submission - * *inprog* = algorithm development still in progress - -##### MSSpace Cost Rankings - -|Rank|Submission |Team |Hardware |Status |Cost |capex |opex |unit cost|units@100K qps|KwH*4yrs | -|----|--------------------|------------------------------|-----------------------|------- |--------------|--------|--------|---------|--------------|----------| -| 1|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md)|Intel |Intel Optane |final|**$16,409.08** |$14,664.20|$1,744.88|$14,664.20 |1 |17,448.764 | -| 2|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md)|NVidia |NVidia GPU |final|**$150,078.78\*\*** |$150,000.00|$78.78|$150,000.00 |1 |787.774 | -| 3|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md)|NVidia |NVidia GPU |final|**$150,605.68\*\*** |$150,000.00|$605.68|$150,000.00 |1 |6,056.841 | -| 4|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md)|GSI Technology(*org*) |LedaE APU |final|**$398,163.18** |$390,086.62|$8,076.56|$55,726.66 |7 |80,765.638 | -| 5|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md)|Facebook Research(*org*) |NVidia GPU |final|**$735,942.66** |$682,678.90|$53,263.76|$22,021.90 |31 |532,637.584 | -| 6|[-](-)|- |- |-|**-** |-|-|- |- |- | - -* The operational point for ranking is 0.90 recall@10. We will use the lowest power consumption/query for the search parameters that meet or exceed 0.90 recall@10. -* The formula is based on: - * Take the algorithm's throughput submitted to leaderboard, use it to scale no. of systems needed to scale to 100K qps (using ceiling to round up any decimal.) - * Capex = cost per system * scale no. - * Take w*s/q from algorithm's power metric submitted to leaderboard and convert to KwH/q. - * Multiply by total queries at 100K qps for 4 years = 4x365x24x60x60x100000 total queries over 4 years. - * Opex = (total queries over 4 years) * KwH/query * $0.10/KwH -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *final* = final submission - * *inprog* = algorithm development still in progress -* \*\*Nvidia has not yet approved the MSRP cost used for ranking, so participation in this benchmark is still pending. - -#### Text2Image - -##### Text2Image Recall Rankings - -|Rank|Submission |Team |Hardware |Status |R@10 | -|----|-------------------------------------------------------|------------------------------|-----------------------|--------|-------------| -| 1|[diskann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/diskann-bare-metal/README.md) |Microsoft Research India(*org*) |Dell PowerEdge |final|**[0.98130](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/diskann-bare-metal/text2image-1B_recall.png)** | -| 2|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md) |Intel |Intel Optane |final|**[0.97340](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/text2image-1B_recall.png)** | -| 3|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md) |NVidia |NVidia GPU |final|**[0.94691](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/text2image-1B_recall.png)** | -| 4|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |final|**[0.92855](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/text2image-1B_recall.png)** | -| 5|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |final|**[0.86028](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/text2image-1B_recall.png)** | -| 6|[-](-) |- |- |-|**-** | - -* The operational point for ranking is 2000 QPS. We will use the highest recall for the search parameters that meet or exceed 2000 QPS. -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *final* = final submission - * *inprog* = algorithm development still in progress - -##### Text2Image Throughput Rankings - -|Rank|Submission |Team |Hardware |Status |Q/S | -|----|-------------------------------------------------------|------------------------------|-----------------------|--------|------------| -| 1|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md) |NVidia |NVidia GPU |final|**[19,340](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/text2image-1B_throughput.png)** | -| 2|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md) |Intel |Intel Optane |final|**[17,063](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/text2image-1B_throughput.png)** | -| 3|[diskann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/diskann-bare-metal/README.md) |Microsoft Research India(*org*) |Dell PowerEdge |final|**[9,307](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/diskann-bare-metal/text2image-1B_throughput.png)** | -| 4|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |final|**[4,838](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/text2image-1B_throughput.png)** | -| 5|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |final|**[1,789](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/text2image-1B_throughput.png)** | -| 6|[-](-) |- |- |-|**-** | - -* The operational point for ranking is 0.860 recall@10. We will use the highest throughput for the search parameters that meet or exceed 0.860 recall@10. -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *final* = final submission - * *inprog* = algorithm development still in progress - -##### Text2Image Power Rankings - -|Rank|Submission |Team |Hardware |Status |W*S/Q | -|----|-------------------------------------------------------|------------------------------|-----------------------|--------|-----------| -| 1|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md) |NVidia |NVidia GPU |final|**[0.0094](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/text2image-1B_power.png)**| -| 2|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md) |Intel |Intel Optane |final|**[0.0446](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/text2image-1B_power.png)**| -| 3|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |final|**[0.1128](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/text2image-1B_power.png)**| -| 4|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |final|**[0.1242](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/text2image-1B_power.png)**| -| 5|[-](-) |- |- |-|**-**| -| 6|[-](-) |- |- |-|**-**| - -* The operational point for ranking is 0.86 recall@10. We will use the lowest power consumption for the search parameters that meet or exceed 0.86 recall@10. -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *final* = final submission - * *inprog* = algorithm development still in progress - -##### Text2Image Cost Rankings - -|Rank|Submission |Team |Hardware |Status |Cost |capex |opex |unit cost|units@100K qps|KwH*4yrs | -|----|---------------------|------------------------------|---------------------|--------|--------------|--------|--------|---------|--------------|---------| -| 1|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md) |Intel |Intel Optane |final|**$103,599.49** |$87,985.20|$15,614.29|$14,664.20 |6 |156,142.873| -| 2|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md) |NVidia |NVidia GPU |final|**$903,307.30\*\*** |$900,000.00|$3,307.30|$150,000.00 |6 |33,072.963| -| 3|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |final|**$1,213,773.56** |$1,170,259.86|$43,513.70|$55,726.66 |21 |435,137.010| -| 4|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |final|**$1,272,735.86** |$1,233,226.40|$39,509.46|$22,021.90 |56 |395,094.625| -| 5|[-](-) |- |- |-|**-** |-|-|- |- |-| -| 6|[-](-) |- |- |-|**-** |-|-|- |- |-| - -* The operational point for ranking is 0.86 recall@10. We will use the lowest power consumption/query for the search parameters that meet or exceed 0.86 recall@10. -* The formula is based on: - * Take the algorithm's throughput submitted to leaderboard, use it to scale no. of systems needed to scale to 100K qps (using ceiling to round up any decimal.) - * Capex = cost per system * scale no. - * Take w*s/q from algorithm's power metric submitted to leaderboard and convert to KwH/q. - * Multiply by total queries at 100K qps for 4 years = 4x365x24x60x60x100000 total queries over 4 years. - * Opex = (total queries over 4 years) * KwH/query * $0.10/KwH -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *final* = final submission - * *inprog* = algorithm development still in progress -* \*\*Nvidia has not yet approved the MSRP cost used for ranking, so participation in this benchmark is still pending. - -#### FBSimSearchNet - -##### FBSimSearchNet AP Rankings - -|Rank|Submission |Team |Hardware |Status |AP | -|----|-------------------------------------------------------|------------------------------|-----------------------|--------|-------------| -| 1|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |final|**[0.99684](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/ssnpp-1B_recall.png)** | -| 2|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |final|**[0.97863](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/ssnpp-1B_recall.png)** | -| 3|[-](-) |- |- |-|**-** | -| 4|[-](-) |- |- |-|**-** | -| 5|[-](-) |- |- |-|**-** | -| 6|[-](-) |- |- |-|**-** | - -* The operational point for ranking is 2000 QPS. We will use the highest recall for the search parameters that meet or exceed 2000 QPS. -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *final* = final submission - * *inprog* = algorithm development still in progress - -##### FBSimSearchNet Throughput Rankings - -|Rank|Submission |Team |Hardware |Status |Q/S | -|----|-------------------------------------------------------|------------------------------|-----------------------|--------|------------| -| 1|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |final|**[9,345](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/ssnpp-1B_throughput.png)** | -| 2|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |final|**[5,699](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/ssnpp-1B_throughput.png)** | -| 3|[-](-) |- |- |-|**-** | -| 4|[-](-) |- |- |-|**-** | -| 5|[-](-) |- |- |-|**-** | -| 6|[-](-) |- |- |-|**-** | - -* The operational point for ranking is 0.9 recall@10. We will use the highest throughput for the search parameters that meet or exceed 0.9 average precision. -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *final* = final submission - * *inprog* = algorithm development still in progress - - -##### FBSimSearchNet Power Rankings - -|Rank|Submission |Team |Hardware |Status |W*S/Q | -|----|-------------------------------------------------------|------------------------------|-----------------------|--------|-----------| -| 1|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |final|**[0.0469](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/ssnpp-1B_power.png)**| -| 2|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |final|**[0.0904](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/ssnpp-1B_power.png)**| -| 3|[-](-) |- |- |-|**-**| -| 4|[-](-) |- |- |-|**-**| -| 5|[-](-) |- |- |-|**-**| -| 6|[-](-) |- |- |-|**-**| - -* The operational point for ranking is 0.9 recall@10. We will use the lowest power consumption for the search parameters that meet or exceed 0.9 average precision. -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *final* = final submission - * *inprog* = algorithm development still in progress - -##### FBSimSearchNet Cost Rankings - -|Rank|Submission |Team |Hardware |Status |Cost |capex |opex |unit cost|units@100K qps|KwH*4yrs | -|----|--------------------|------------------------------|---------------------|--------|--------------|--------|--------|---------|--------------|---------| -| 1|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md)|Facebook Research(*org*) |NVidia GPU |final|**$428,074.79** |$396,394.20|$31,680.59|$22,021.90 |18 |316,805.859| -| 2|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md)|GSI Technology(*org*) |LedaE APU |final|**$629,442.91** |$612,993.26|$16,449.65|$55,726.66 |11 |164,496.451| -| 3|[-](-)|- |- |-|**-** | -| -| -| -| -| -| 4|[-](-)|- |- |-|**-** | -| -| -| -| -| - -* The operational point for ranking is 0.9 recall@10. We will use the lowest power consumption/query for the search parameters that meet or exceed 0.9 recall@10. -* The formula is based on: - * Take the algorithm's throughput submitted to leaderboard, use it to scale no. of systems needed to scale to 100K qps (using ceiling to round up any decimal.) - * Capex = cost per system * scale no. - * Take w*s/q from algorithm's power metric submitted to leaderboard and convert to KwH/q. - * Multiply by total queries at 100K qps for 4 years = 4x365x24x60x60x100000 total queries over 4 years. - * Opex = (total queries over 4 years) * KwH/query * $0.10/KwH -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *final* = final submission - * *inprog* = algorithm development still in progress -* \*\*Nvidia has not yet approved the MSRP cost used for ranking, so participation in this benchmark is still pending. diff --git a/t3/LB_history/Nov.29.2021/LEADERBOARDS.md b/t3/LB_history/Nov.29.2021/LEADERBOARDS.md deleted file mode 100644 index ec4cc12f8..000000000 --- a/t3/LB_history/Nov.29.2021/LEADERBOARDS.md +++ /dev/null @@ -1,680 +0,0 @@ -# T3 Track Leaderboards (Unofficial) - -Please note that all rankings are currently unofficial due to the following reasons: -* We continue to take changes to algorithms and indexes until Dec 1, so scores and rankings are still subject to change. -* All [open tasks and issues](TASKS_ISSUES_RESOLUTIONS.md) must be resolved. - -## Final Rankings On Private Query Set - -*Not yet available* - -## Rankings On Public Query Set - -### Rankings By Submission Name (alphabetical) - -|Submission |Team |Hardware |Status |Evaluator|Algo |Runs |[Recall Rank](#recall-or-ap-rankings)|[Thru-put Rank](#throughput-rankings)|[Power Rank](#power-rankings)|[Cost Rank](#cost-rankings)| -|--------------------|-----------|----------|-------|---------|---------|--------|---------|--------------|-----------------------------|---------------------------| -|deepgram |DeepGram |NVidia GPU|final | -| -| -| *NQ*| *NQ*| *NQ*| *NQ*| -|[diskann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/diskann-bare-metal/README.md) |Microsoft Research(*org*) |Dell PowerEdge |inprog |Harsha Simhadri |[src](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/benchmark/algorithms/diskann-t2.py) |[nb](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/diskann-bare-metal/EvalPublic.ipynb) |[1](#recall-or-ap-rankings) |[4](#throughput-rankings) |*NQ* |*NQ* | -|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |final |George Williams |[src](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/benchmark/algorithms/faiss_t3.py) |[nb](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/EvalPublic.ipynb) |[6](#recall-or-ap-rankings) |[6](#throughput-rankings) |[5](#power-rankings) |[2](#cost-rankings) | -|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |inprog |George Williams |[src](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/benchmark/algorithms/gemini.py) |[nb](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/EvalPublic.ipynb) |[3](#recall-or-ap-rankings) |[5](#throughput-rankings) |[4](#power-rankings) |[3](#cost-rankings) | -|kanndi |Silo.ai |LedaE APU |inprog | -| -| -| -| -| -| -| -|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md) |NVidia |NVidia GPU |inprog |George Williams |[src](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/benchmark/algorithms/cuanns_ivfpq.py) |[nb](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/EvalPublic.ipynb) |[2](#recall-or-ap-rankings) |[3](#throughput-rankings) |[1](#power-rankings) |*NQ* | -|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md) |NVidia |NVidia GPU |inprog |George Williams |[src](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/benchmark/algorithms/cuanns_multigpu.py) |[nb](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_multigpu/EvalPublic.ipynb) |[5](#recall-or-ap-rankings) |[1](#throughput-rankings) |[3](#power-rankings) |*NQ* | -|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md)|Intel |Intel Optane |inprog|George Williams |[src](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/benchmark/algorithms/graphann.py) |[nb](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/EvalPublic.ipynb)|[4](#recall-or-ap-rankings) |[2](#throughput-rankings) |[2](#power-rankings) |[1](#cost-rankings) | -|optanne_graphann_2 |Intel |Intel Optane |inprog | -| -| -| -| -| -| -| -|vector_t3 |Vector Inst|NVidia GPU|final | -| -| -| *NQ*| *NQ*| *NQ*| *NQ*| - -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *final* = final submission - * *inprog* = algorithm development still in progress - * *NQ* = not qualified - -### Rankings Per Metric - -#### Recall Or AP Rankings - -|Rank|Submission |Team |Hardware|Status |Score |[Deep1B](#deep1B-recall-rankings)|[BigANN](#bigann-recall-rankings)|[MSTuring](#msturing-recall-rankings)|[MSSpace](#msspace-recall-rankings)|[Text2Image](#text2image-recall-rankings)|[FBSSNet](#fbsimsearchnet-ap-rankings)| -|----|------------------|-------|--------|-------|-----------|------|------|--------|-------|----------|-------| -| 1|[diskann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/diskann-bare-metal/README.md)|Microsoft Research(*org*)|Dell PowerEdge |inprog|**0.420**|[0.99821](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/diskann-bare-metal/deep-1B_recall.png) |[0.99976](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/diskann-bare-metal/bigann-1B_recall.png) |[0.99444](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/diskann-bare-metal/msturing-1B_recall.png) |[0.99342](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/diskann-bare-metal/msspacev-1B_recall.png) |[0.98130](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/diskann-bare-metal/text2image-1B_recall.png) |- | -| 2|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md)|NVidia|NVidia GPU |inprog|**0.373**|[0.99542](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/deep-1B_recall.png) |[0.99882](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/bigann-1B_recall.png) |[0.98993](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/msturing-1B_recall.png) |[0.99428](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/msspacev-1B_recall.png) |[0.94136](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/text2image-1B_recall.png) |- | -| 3|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md)|GSI Technology(*org*)|LedaE APU |inprog|**0.296**|[0.98871](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/deep-1B_recall.png) |[0.99253](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/bigann-1B_recall.png) |[0.97841](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/msturing-1B_recall.png) |[0.98622](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/msspacev-1B_recall.png) |[0.88585](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/text2image-1B_recall.png) |[0.99053](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/ssnpp-1B_recall.png) | -| 4|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md)|Intel|Intel Optane |inprog|**0.279**|[0.98264](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/deep-1B_recall.png) |[0.99084](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/bigann-1B_recall.png) |[0.96218](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/msturing-1B_recall.png) |[0.98791](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/msspacev-1B_recall.png) |[0.90277](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/text2image-1B_recall.png) |- | -| 5|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md)|NVidia|NVidia GPU |inprog|**0.278**|[0.99504](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_multigpu/deep-1B_recall.png) |[0.99815](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_multigpu/bigann-1B_recall.png) |[0.98399](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_multigpu/msturing-1B_recall.png) |[0.98785](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_multigpu/msspacev-1B_recall.png) |- |- | -| 6|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md)|Facebook Research(*org*)|NVidia GPU |final|**baseline**|[0.94275](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/deep-1B_recall.png) |[0.92671](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/bigann-1B_recall.png) |[0.90900](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/msturing-1B_recall.png) |[0.90853](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/msspacev-1B_recall.png) |[0.86028](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/text2image-1B_recall.png) |[0.97863](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/ssnpp-1B_recall.png) | -| 7| -| -| -| -| -| -| -| -| -| -| -| -| 8| -| -| -| -| -| -| -| -| -| -| -| -| 9| -| -| -| -| -| -| -| -| -| -| -| -| 10| -| -| -| -| -| -| -| -| -| -| -| - -* A submission must support at least 3 databases to qualify for this ranking. -* The ranking is based on the score, which is the sum of benchmark improvements of qualifying databases (shown in specific database columns after the score column.) -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *final* = final submission - * *inprog* = algorithm development still in progress - -#### Throughput Rankings - -|Rank|Submission |Team |Hardware|Status |Score |[Deep1B](#deep1B-throughput-rankings)|[BigANN](#bigann-throughput-rankings)|[MSTuring](#msturing-throughput-rankings)|[MSSpace](#msspace-throughput-rankings)|[Text2Image](#text2image-throughput-rankings)|[FBSSNet](#fbsimsearchnet-throughput-rankings)| -|----|------------------|-------|--------|-------|-----------|------|------|--------|-------|----------|--------------| -| 1|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md)|NVidia|NVidia GPU |inprog|**2983752.533**|[805,064.205](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_multigpu/deep-1B_throughput.png) |[771,493.948](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_multigpu/bigann-1B_throughput.png) |[578,677.940](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_multigpu/msturing-1B_throughput.png) |[841,150.465](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_multigpu/msspacev-1B_throughput.png) |- |- | -| 2|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md)|Intel|Intel Optane |inprog|**821550.201**|[184,490.708](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/deep-1B_throughput.png) |[343,727.791](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/bigann-1B_throughput.png) |[157,277.710](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/msturing-1B_throughput.png) |[139,612.021](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/msspacev-1B_throughput.png) |[10,838.358](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/text2image-1B_throughput.png) |- | -| 3|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md)|NVidia|NVidia GPU |inprog|**397623.802**|[97,268.471](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/deep-1B_throughput.png) |[84,367.141](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/bigann-1B_throughput.png) |[111,580.136](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/msturing-1B_throughput.png) |[109,555.059](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/msspacev-1B_throughput.png) |[9,249.383](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/text2image-1B_throughput.png) |- | -| 4|[diskann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/diskann-bare-metal/README.md)|Microsoft Research(*org*)|Dell PowerEdge |inprog|**50635.296**|[12,926.890](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/diskann-bare-metal/deep-1B_throughput.png) |[19,094.371](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/diskann-bare-metal/bigann-1B_throughput.png) |[17,200.601](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/diskann-bare-metal/msturing-1B_throughput.png) |[6,503.212](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/diskann-bare-metal/msspacev-1B_throughput.png) |[9,306.610](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/diskann-bare-metal/text2image-1B_throughput.png) |- | -| 5|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md)|GSI Technology(*org*)|LedaE APU |inprog|**37428.363**|[9,150.271](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/deep-1B_throughput.png) |[9,504.865](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/bigann-1B_throughput.png) |[20,166.678](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/msturing-1B_throughput.png) |[8,587.024](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/msspacev-1B_throughput.png) |[1,864.634](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/text2image-1B_throughput.png) |[8,123.552](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/ssnpp-1B_throughput.png) | -| 6|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md)|Facebook Research(*org*)|NVidia GPU |final|**baseline**|[4,417.036](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/deep-1B_throughput.png) |[3,086.656](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/bigann-1B_throughput.png) |[2,359.485](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/msturing-1B_throughput.png) |[2,770.848](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/msspacev-1B_throughput.png) |[1,762.363](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/text2image-1B_throughput.png) |[5,572.272](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/ssnpp-1B_throughput.png) | -| 7| -| -| -| -| -| -| -| -| -| -| -| -| 8| -| -| -| -| -| -| -| -| -| -| -| -| 9| -| -| -| -| -| -| -| -| -| -| -| -| 10| -| -| -| -| -| -| -| -| -| -| -| - -* A submission must support at least 3 databases to qualify for this ranking. -* The ranking is based on the score, which is the sum of benchmark improvements of qualifying databases (shown in specific database columns after the score column.) -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *final* = final submission - * *inprog* = algorithm development still in progress - -#### Power Rankings - -|Rank|Submission |Team |Hardware|Status |Score |[Deep1B](#deep1B-power-rankings)|[BigANN](#bigann-power-rankings)|[MSTuring](#msturing-power-rankings)|[MSSpace](#msspace-power-rankings)|[Text2Image](#text2image-power-rankings)|[FBSSNet](#fbsimsearchnet-power-rankings)| -|----|------------------|-------|--------|-------|-----------|------|------|--------|-------|-----|-----| -| 1|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md)|NVidia|NVidia GPU |inprog|**-0.744**|[0.0023](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/deep-1B_power.png) |[0.0024](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/bigann-1B_power.png) |[0.0017](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/msturing-1B_power.png) |[0.0017](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/msspacev-1B_power.png) |[0.0222](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/text2image-1B_power.png)|-| -| 2|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md)|Intel|Intel Optane |inprog|**-0.687**|[0.0040](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/deep-1B_power.png) |[0.0021](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/bigann-1B_power.png) |[0.0049](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/msturing-1B_power.png) |[0.0054](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/msspacev-1B_power.png) |[0.0702](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/text2image-1B_power.png)|-| -| 3|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md)|NVidia|NVidia GPU |inprog|**-0.650**|[0.0002](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_multigpu/deep-1B_power.png) |[0.0002](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_multigpu/bigann-1B_power.png) |[0.0004](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_multigpu/msturing-1B_power.png) |[0.0002](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_multigpu/msspacev-1B_power.png) |-|-| -| 4|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/diskann-bare-metal/README.md)|GSI Technology(*org*)|LedaE APU |inprog|**-0.489**|[0.0398](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/deep-1B_power.png) |[0.0394](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/bigann-1B_power.png) |[0.0227](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/msturing-1B_power.png) |[0.0485](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/msspacev-1B_power.png) |[0.1759](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/text2image-1B_power.png)|[0.0540](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/ssnpp-1B_power.png)| -| 5|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md)|Facebook Research(*org*)|NVidia GPU |final|**baseline**|[0.1126](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/deep-1B_power.png) |[0.1671](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/bigann-1B_power.png) |[0.2037](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/msturing-1B_power.png) |[0.1674](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/msspacev-1B_power.png) |[0.1233](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/text2image-1B_power.png)|[0.0949](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/ssnpp-1B_power.png)| -| 6|[-](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md)|-|- |-|**-**|- |- |- |- |-|-| -| 7| -| -| -| -| -| -| -| -| -| -| -| -| 8| -| -| -| -| -| -| -| -| -| -| -| -| 9| -| -| -| -| -| -| -| -| -| -| -| -| 10| -| -| -| -| -| -| -| -| -| -| -| - -* A submission must support at least 3 databases to qualify for this ranking. -* The ranking is based on the score, which is the sum of benchmark improvements of qualifying databases (shown in specific database columns after the score column.) -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *final* = final submission - * *inprog* = algorithm development still in progress - -#### Cost Rankings - -|Rank|Submission |Team |Hardware|Status |Score |[Deep1B](#deep1B-cost-rankings)|[BigANN](#bigann-cost-rankings)|[MSTuring](#msturing-cost-rankings)|[MSSpace](#msspace-cost-rankings)|[Text2Image](#text2image-cost-rankings)|[FBSSNet](#fbsimsearchnet-cost-rankings)| -|----|------------------|-------|--------|-------|-----------|------|------|--------|-------|----------|--------------| -| 1|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md)|Intel|Intel Optane |inprog|**$-4,285,768.44**|$16,082.49 |$15,407.15 |$16,397.80 |$16,563.61 |$171,244.96 |- | -| 2|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md)|Facebook Research(*org*)|NVidia GPU |final|**baseline**|$545,952.10 |$785,282.45 |$1,018,332.30 |$873,460.84 |$1,298,436.77 |$429,634.84 | -| 3|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md)|GSI Technology(*org*)|LedaE APU |inprog|**$1,089,170.96**|$626,932.94 |$626,785.91 |$286,578.81 |$685,704.76 |$3,070,882.16 |$743,385.69 | -| 4|[-](-)|-|- |-|**-**|- |- |- |- |- |- | -| 5|[-](-)|-|- |-|**-**|- |- |- |- |- |- | -| 6|[-](-)|-|- |-|**-**|- |- |- |- |- |- | -| 7| -| -| -| -| -| -| -| -| -| -| -| -| 8| -| -| -| -| -| -| -| -| -| -| -| -| 9| -| -| -| -| -| -| -| -| -| -| -| -| 10| -| -| -| -| -| -| -| -| -| -| -| - -* A submission must support at least 3 databases to qualify for this ranking. -* The ranking is based on the score, which is the sum of benchmark improvements of qualifying databases (shown in specific database columns after the score column.) -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *final* = final submission - * *inprog* = algorithm development still in progress - -### Rankings Per Database - -#### Deep1B - -##### Deep1B Recall Rankings - -|Rank|Submission |Team |Hardware |Status |R@10 | -|----|-------------------------------------------------------|------------------------------|-----------------------|--------|-----------| -| 1|[diskann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/diskann-bare-metal/README.md) |Microsoft Research(*org*) |Dell PowerEdge |inprog|**[0.99821](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/diskann-bare-metal/deep-1B_recall.png)**| -| 2|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md) |NVidia |NVidia GPU |inprog|**[0.99542](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/deep-1B_recall.png)**| -| 3|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md) |NVidia |NVidia GPU |inprog|**[0.99504](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_multigpu/deep-1B_recall.png)**| -| 4|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |inprog|**[0.98871](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/deep-1B_recall.png)**| -| 5|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md) |Intel |Intel Optane |inprog|**[0.98264](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/deep-1B_recall.png)**| -| 6|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |final|**[0.94275](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/deep-1B_recall.png)**| -| 7| -| -| -| -| -| -| 8| -| -| -| -| -| -| 9| -| -| -| -| -| -| 10| -| -| -| -| -| - -* The operational point for ranking is 2000 QPS. We will use the highest recall for the search parameters that meet or exceed 2000 QPS. -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *final* = final submission - * *inprog* = algorithm development still in progress - -##### Deep1B Throughput Rankings - -|Rank|Submission |Team |Hardware |Status |Q/S | -|----|-------------------------------------------------------|------------------------------|-----------------------|--------|-----------| -| 1|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md) |NVidia |NVidia GPU |inprog|**[805,064.205](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_multigpu/deep-1B_throughput.png)**| -| 2|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md) |Intel |Intel Optane |inprog|**[184,490.708](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/deep-1B_throughput.png)**| -| 3|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md) |NVidia |NVidia GPU |inprog|**[97,268.471](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/deep-1B_throughput.png)**| -| 4|[diskann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/diskann-bare-metal/README.md) |Microsoft Research(*org*) |Dell PowerEdge |inprog|**[12,926.890](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/diskann-bare-metal/deep-1B_throughput.png)**| -| 5|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |inprog|**[9,150.271](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/deep-1B_throughput.png)**| -| 6|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |final|**[4,417.036](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/deep-1B_throughput.png)**| -| 7| -| -| -| -| -| -| 8| -| -| -| -| -| -| 9| -| -| -| -| -| -| 10| -| -| -| -| -| - -* The operational point for ranking is 0.90 recall@10. We will use the highest throughput for the search parameters that meet or exceed 0.90 recall@10. -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *final* = final submission - * *inprog* = algorithm development still in progress - -##### Deep1B Power Rankings - -|Rank|Submission |Team |Hardware |Status |W*S/Q | -|----|-------------------------------------------------------|------------------------------|-----------------------|--------|-----------| -| 1|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md) |NVidia |NVidia GPU |inprog|**[0.0002](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_multigpu/deep-1B_power.png)**| -| 2|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md) |NVidia |NVidia GPU |inprog|**[0.0023](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/deep-1B_power.png)**| -| 3|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md) |Intel |Intel Optane |inprog|**[0.0040](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/deep-1B_power.png)**| -| 4|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |inprog|**[0.0398](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/deep-1B_power.png)**| -| 5|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |final|**[0.1126](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/deep-1B_power.png)**| -| 6|[-](-) |- |- |-|**-**| -| 7| -| -| -| -| -| -| 8| -| -| -| -| -| -| 9| -| -| -| -| -| -| 10| -| -| -| -| -| - -* The operational point for ranking is 0.90 recall@10. We will use the lowest power consumption for the search parameters that meet or exceed 0.90 recall@10. -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *final* = final submission - * *inprog* = algorithm development still in progress - -##### Deep1B Cost Rankings - -|Rank|Submission |Team |Hardware |Status |Cost |capex |opex |unit cost|units@100K qps|KwH*4yrs | -|----|--------------------|---------|-----------------------|--------|-------------|--------|--------|---------|--------------|---------| -| 1|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md)|Intel |Intel Optane |inprog|**$16,082.49** |$14,664.20|$1,418.29|$14,664.20 |1 |14,182.933| -| 2|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md)|Facebook Research(*org*) |NVidia GPU |final|**$545,952.10** |$506,503.70|$39,448.40|$22,021.90 |23 |394,484.028| -| 3|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md)|GSI Technology(*org*) |LedaE APU |inprog|**$626,932.94** |$612,993.26|$13,939.68|$55,726.66 |11 |139,396.812| -| 4|[-](-)|- |- |-|**-** |-|-|- |- |-| -| 5|[-](-)|- |- |-|**-** |-|-|- |- |-| -| 6|[-](-)|- |- |-|**-** |-|-|- |- |-| -| 6| -| -| -| -| -| -| -| -| -| -| -| 7| -| -| -| -| -| -| -| -| -| -| -| 8| -| -| -| -| -| -| -| -| -| -| -| 9| -| -| -| -| -| -| -| -| -| -| -| 10| -| -| -| -| -| -| -| -| -| -| - -* The operational point for ranking is 0.90 recall@10. We will use the lowest power consumption/query for the search parameters that meet or exceed 0.90 recall@10. -* The formula is based on: - * Take the algorithm's throughput submitted to leaderboard, use it to scale no. of systems needed to scale to 100K qps (using ceiling to round up any decimal.) - * Capex = cost per system * scale no. - * Take w*s/q from algorithm's power metric submitted to leaderboard and convert to KwH/q. - * Multiply by total queries at 100K qps for 4 years = 4x365x24x60x60x100000 total queries - * Opex = (total queries over 4 years) * KwH/query * $0.10/KwH -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *final* = final submission - * *inprog* = algorithm development still in progress - -#### BigANN - -##### BigANN Recall Rankings - -|Rank|Submission |Team |Hardware |Status |R@10 | -|----|-------------------------------------------------------|------------------------------|-----------------------|--------|-------------| -| 1|[diskann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/diskann-bare-metal/README.md) |Microsoft Research(*org*) |Dell PowerEdge |inprog|**[0.99976](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/diskann-bare-metal/bigann-1B_recall.png)** | -| 2|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md) |NVidia |NVidia GPU |inprog|**[0.99882](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/bigann-1B_recall.png)** | -| 3|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md) |NVidia |NVidia GPU |inprog|**[0.99815](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_multigpu/bigann-1B_recall.png)** | -| 4|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |inprog|**[0.99253](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/bigann-1B_recall.png)** | -| 5|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md) |Intel |Intel Optane |inprog|**[0.99084](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/bigann-1B_recall.png)** | -| 6|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |final|**[0.92671](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/bigann-1B_recall.png)** | -| 7| -| -| -| -| -| -| 8| -| -| -| -| -| -| 9| -| -| -| -| -| -| 10| -| -| -| -| -| - -* The operational point for ranking is 2000 QPS. We will use the highest recall for the search parameters that meet or exceed 2000 QPS. -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *final* = final submission - * *inprog* = algorithm development still in progress - -##### BigANN Throughput Rankings - -|Rank|Submission |Team |Hardware |Status |Q/S | -|----|-------------------------------------------------------|------------------------------|-----------------------|--------|-------------| -| 1|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md) |NVidia |NVidia GPU |inprog|**[771,493.948](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_multigpu/bigann-1B_throughput.png)** | -| 2|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md) |Intel |Intel Optane |inprog|**[343,727.791](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/bigann-1B_throughput.png)** | -| 3|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md) |NVidia |NVidia GPU |inprog|**[84,367.141](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/bigann-1B_throughput.png)** | -| 4|[diskann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/diskann-bare-metal/README.md) |Microsoft Research(*org*) |Dell PowerEdge |inprog|**[19,094.371](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/diskann-bare-metal/bigann-1B_throughput.png)** | -| 5|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |inprog|**[9,504.865](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/bigann-1B_throughput.png)** | -| 6|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |final|**[3,086.656](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/bigann-1B_throughput.png)** | -| 7| -| -| -| -| -| -| 8| -| -| -| -| -| -| 9| -| -| -| -| -| -| 10| -| -| -| -| -| - -* The operational point for ranking is 0.90 recall@10. We will use the highest throughput for the search parameters that meet or exceed 0.90 recall@10. -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *final* = final submission - * *inprog* = algorithm development still in progress - -##### BigANN Power Rankings - -|Rank|Submission |Team |Hardware |Status |W*S/Q | -|----|-------------------------------------------------------|------------------------------|-----------------------|--------|-----------| -| 1|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md) |NVidia |NVidia GPU |inprog|**[0.0002](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_multigpu/bigann-1B_power.png)**| -| 2|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md) |Intel |Intel Optane |inprog|**[0.0021](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/bigann-1B_power.png)**| -| 3|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md) |NVidia |NVidia GPU |inprog|**[0.0024](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/bigann-1B_power.png)**| -| 4|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |inprog|**[0.0394](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/bigann-1B_power.png)**| -| 5|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |final|**[0.1671](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/bigann-1B_power.png)**| -| 6|[-](-) |- |- |-|**-**| -| 7| -| -| -| -| -| -| 8| -| -| -| -| -| -| 9| -| -| -| -| -| -| 10| -| -| -| -| -| - -* The operational point for ranking is 0.90 recall@10. We will use the lowest power consumption for the search parameters that meet or exceed 0.90 recall@10. -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *final* = final submission - * *inprog* = algorithm development still in progress - -##### BigANN Cost Rankings - -|Rank|Submission |Team |Hardware |Status |Cost |capex |opex |unit cost|units@100K qps|KwH*4yrs | -|----|--------------------|------------------------------|--------------------|--------|--------------|--------|--------|---------|--------------|---------| -| 1|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md)|Intel |Intel Optane |inprog|**$15,407.15** |$14,664.20|$742.95|$14,664.20 |1 |7,429.540| -| 2|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md)|GSI Technology(*org*) |LedaE APU |inprog|**$626,785.91** |$612,993.26|$13,792.65|$55,726.66 |11 |137,926.464| -| 3|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md)|Facebook Research(*org*) |NVidia GPU |final|**$785,282.45** |$726,722.70|$58,559.75|$22,021.90 |33 |585,597.505| -| 4|[-](-)|- |- |-|**-** |-|-|- |- |-| -| 5|[-](-)|- |- |-|**-** |-|-|- |- |-| -| 6|[-](-)|- |- |-|**-** |-|-|- |- |-| -| 7| -| -| -| -| -| -| -| -| -| -| -| 8| -| -| -| -| -| -| -| -| -| -| -| 9| -| -| -| -| -| -| -| -| -| -| -| 10| -| -| -| -| -| -| -| -| -| -| - -* The operational point for ranking is 0.90 recall@10. We will use the lowest power consumption/query for the search parameters that meet or exceed 0.90 recall@10. -* The formula is based on: - * Take the algorithm's throughput submitted to leaderboard, use it to scale no. of systems needed to scale to 100K qps (using ceiling to round up any decimal.) - * Capex = cost per system * scale no. - * Take w*s/q from algorithm's power metric submitted to leaderboard and convert to KwH/q. - * Multiply by total queries at 100K qps for 4 years = 4x365x24x60x60x100000 total queries over 4 years. - * Opex = (total queries over 4 years) * KwH/query * $0.10/KwH -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *final* = final submission - * *inprog* = algorithm development still in progress - -#### MSTuring - -##### MSTuring Recall Rankings - -|Rank|Submission |Team |Hardware |Status |R@10 | -|----|-------------------------------------------------------|------------------------------|-----------------------|--------|---------------| -| 1|[diskann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/diskann-bare-metal/README.md) |Microsoft Research(*org*) |Dell PowerEdge |inprog|**[0.99444](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/diskann-bare-metal/msturing-1B_recall.png)** | -| 2|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md) |NVidia |NVidia GPU |inprog|**[0.98993](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/msturing-1B_recall.png)** | -| 3|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md) |NVidia |NVidia GPU |inprog|**[0.98399](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_multigpu/msturing-1B_recall.png)** | -| 4|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |inprog|**[0.97841](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/msturing-1B_recall.png)** | -| 5|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md) |Intel |Intel Optane |inprog|**[0.96218](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/msturing-1B_recall.png)** | -| 6|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |final|**[0.90900](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/msturing-1B_recall.png)** | -| 7| -| -| -| -| -| -| 8| -| -| -| -| -| -| 9| -| -| -| -| -| -| 10| -| -| -| -| -| - -* The operational point for ranking is 2000 QPS. We will use the highest recall for the search parameters that meet or exceed 2000 QPS. -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *final* = final submission - * *inprog* = algorithm development still in progress - -##### MSTuring Throughput Rankings - -|Rank|Submission |Team |Hardware |Status |Q/S | -|----|-------------------------------------------------------|------------------------------|-----------------------|--------|------------| -| 1|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md) |NVidia |NVidia GPU |inprog|**[578,677.940](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_multigpu/msturing-1B_throughput.png)** | -| 2|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md) |Intel |Intel Optane |inprog|**[157,277.710](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/msturing-1B_throughput.png)** | -| 3|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md) |NVidia |NVidia GPU |inprog|**[111,580.136](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/msturing-1B_throughput.png)** | -| 4|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |inprog|**[20,166.678](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/msturing-1B_throughput.png)** | -| 5|[diskann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/diskann-bare-metal/README.md) |Microsoft Research(*org*) |Dell PowerEdge |inprog|**[17,200.601](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/diskann-bare-metal/msturing-1B_throughput.png)** | -| 6|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |final|**[2,359.485](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/msturing-1B_throughput.png)** | -| 7| -| -| -| -| -| -| 8| -| -| -| -| -| -| 9| -| -| -| -| -| -| 10| -| -| -| -| -| - -* The operational point for ranking is 0.90 recall@10. We will use the highest throughput for the search parameters that meet or exceed 0.90 recall@10. -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *final* = final submission - * *inprog* = algorithm development still in progress - -##### MSTuring Power Rankings - -|Rank|Submission |Team |Hardware |Status|W*S/Q | -|----|-------------------------------------------------------|------------------------------|-----------------------|------|--------------| -| 1|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md) |NVidia |NVidia GPU |inprog|**[0.0004](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_multigpu/msturing-1B_power.png)** | -| 2|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md) |NVidia |NVidia GPU |inprog|**[0.0017](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/msturing-1B_power.png)** | -| 3|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md) |Intel |Intel Optane |inprog|**[0.0049](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/msturing-1B_power.png)** | -| 4|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |inprog|**[0.0227](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/msturing-1B_power.png)** | -| 5|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |final|**[0.2037](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/msturing-1B_power.png)** | -| 6|[-](-) |- |- |-|**-** | -| 7| -| -| -| -| -| -| 8| -| -| -| -| -| -| 9| -| -| -| -| -| -| 10| -| -| -| -| -| - -* The operational point for ranking is 0.90 recall@10. We will use the lowest power consumption for the search parameters that meet or exceed 0.90 recall@10. -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *final* = final submission - * *inprog* = algorithm development still in progress - -##### MSTuring Cost Rankings - -|Rank|Submission |Team |Hardware |Status |Cost |capex |opex |unit cost|units@100K qps|KwH*4yrs | -|----|------------------------------------|------------------------------|-----------------------|--------|--------------|--------|--------|---------|--------------|----------| -| 1|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md) |Intel |Intel Optane |inprog|**$16,397.80** |$14,664.20|$1,733.60|$14,664.20 |1 |17,335.975 | -| 2|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |inprog|**$286,578.81** |$278,633.30|$7,945.51|$55,726.66 |5 |79,455.069 | -| 3|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |final|**$1,018,332.30** |$946,941.70|$71,390.60|$22,021.90 |43 |713,905.964 | -| 4|[-](-) |- |- |-|**-** |-|-|- |- |- | -| 5|[-](-) |- |- |-|**-** |-|-|- |- |- | -| 6|[-](-) |- |- |-|**-** |-|-|- |- |- | -| 7| -| -| -| -| -| -| -| -| -| -| -| 8| -| -| -| -| -| -| -| -| -| -| -| 9| -| -| -| -| -| -| -| -| -| -| -| 10| -| -| -| -| -| -| -| -| -| -| - -* The operational point for ranking is 0.90 recall@10. We will use the lowest power consumption/query for the search parameters that meet or exceed 0.90 recall@10. -* The formula is based on: - * Take the algorithm's throughput submitted to leaderboard, use it to scale no. of systems needed to scale to 100K qps (using ceiling to round up any decimal.) - * Capex = cost per system * scale no. - * Take w*s/q from algorithm's power metric submitted to leaderboard and convert to KwH/q. - * Multiply by total queries at 100K qps for 4 years = 4x365x24x60x60x100000 total queries over 4 years. - * Opex = (total queries over 4 years) * KwH/query * $0.10/KwH -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *final* = final submission - * *inprog* = algorithm development still in progress - -#### MSSpace - -##### MSSpace Recall Rankings - -|Rank|Submission |Team |Hardware |Status |R@10 | -|----|-------------------------------------------------------|------------------------------|-----------------------|--------|---------| -| 1|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md) |NVidia |NVidia GPU |inprog|**[0.0002](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_multigpu/msspacev-1B_power.png)** | -| 2|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md) |NVidia |NVidia GPU |inprog|**[0.0017](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/msspacev-1B_power.png)** | -| 3|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md) |Intel |Intel Optane |inprog|**[0.0054](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/msspacev-1B_power.png)** | -| 4|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |inprog|**[0.0485](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/msspacev-1B_power.png)** | -| 5|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |final|**[0.1674](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/msspacev-1B_power.png)** | -| 6|[-](-) |- |- |-|**-** | -| 7| -| -| -| -| -| -| 8| -| -| -| -| -| -| 9| -| -| -| -| -| -| 10| -| -| -| -| -| - -* The operational point for ranking is 2000 QPS. We will use the highest recall for the search parameters that meet or exceed 2000 QPS. -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *final* = final submission - * *inprog* = algorithm development still in progress - -##### MSSpace Throughput Rankings - -|Rank|Submission |Team |Hardware |Status |Q/S | -|----|-------------------------------------------------------|------------------------------|-----------------------|--------|------------| -| 1|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md) |NVidia |NVidia GPU |inprog|**[841,150.465](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_multigpu/msspacev-1B_throughput.png)** | -| 2|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md) |Intel |Intel Optane |inprog|**[139,612.021](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/msspacev-1B_throughput.png)** | -| 3|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md) |NVidia |NVidia GPU |inprog|**[109,555.059](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/msspacev-1B_throughput.png)** | -| 4|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |inprog|**[8,587.024](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/msspacev-1B_throughput.png)** | -| 5|[diskann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/diskann-bare-metal/README.md) |Microsoft Research(*org*) |Dell PowerEdge |inprog|**[6,503.212](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/diskann-bare-metal/msspacev-1B_throughput.png)** | -| 6|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |final|**[2,770.848](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/msspacev-1B_throughput.png)** | -| 7| -| -| -| -| -| -| 8| -| -| -| -| -| -| 9| -| -| -| -| -| -| 10| -| -| -| -| -| - -* The operational point for ranking is 0.9 recall@10. We will use the highest throughput for the search parameters that meet or exceed 0.9 recall@10. -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *final* = final submission - * *inprog* = algorithm development still in progress - -##### MSSpace Power Rankings - -|Rank|Submission |Team |Hardware |Status |W*S/Q | -|----|-------------------------------------------------------|------------------------------|-----------------------|--------|------------| -| 1|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md) |NVidia |NVidia GPU |inprog|**[0.0002](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_multigpu/msspacev-1B_power.png)** | -| 2|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md) |NVidia |NVidia GPU |inprog|**[0.0017](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/msspacev-1B_power.png)** | -| 3|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md) |Intel |Intel Optane |inprog|**[0.0054](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/msspacev-1B_power.png)** | -| 4|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |inprog|**[0.0485](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/msspacev-1B_power.png)** | -| 5|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |final|**[0.1674](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/msspacev-1B_power.png)** | -| 6|[-](-) |- |- |-|**-** | -| 7| -| -| -| -| -| -| 8| -| -| -| -| -| -| 9| -| -| -| -| -| -| 10| -| -| -| -| -| - -* The operational point for ranking is 0.9 recall@10. We will use the lowest power consumption for the search parameters that meet or exceed 0.9 recall@10. -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *final* = final submission - * *inprog* = algorithm development still in progress - -##### MSSpace Cost Rankings - -|Rank|Submission |Team |Hardware |Status |Cost |capex |opex |unit cost|units@100K qps|KwH*4yrs | -|----|--------------------|------------------------------|-----------------------|------- |--------------|--------|--------|---------|--------------|----------| -| 1|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md)|Intel |Intel Optane |inprog|**$16,563.61** |$14,664.20|$1,899.41|$14,664.20 |1 |18,994.109 | -| 2|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md)|GSI Technology(*org*) |LedaE APU |inprog|**$685,704.76** |$668,719.92|$16,984.84|$55,726.66 |12 |169,848.419 | -| 3|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md)|Facebook Research(*org*) |NVidia GPU |final|**$873,460.84** |$814,810.30|$58,650.54|$22,021.90 |37 |586,505.424 | -| 4|[-](-)|- |- |-|**-** |-|-|- |- |- | -| 5|[-](-)|- |- |-|**-** |-|-|- |- |- | -| 6|[-](-)|- |- |-|**-** |-|-|- |- |- | -| 7| -| -| -| -| -| -| -| -| -| -| -| 8| -| -| -| -| -| -| -| -| -| -| -| 9| -| -| -| -| -| -| -| -| -| -| -| 10| -| -| -| -| -| -| -| -| -| -| - -* The operational point for ranking is 0.90 recall@10. We will use the lowest power consumption/query for the search parameters that meet or exceed 0.90 recall@10. -* The formula is based on: - * Take the algorithm's throughput submitted to leaderboard, use it to scale no. of systems needed to scale to 100K qps (using ceiling to round up any decimal.) - * Capex = cost per system * scale no. - * Take w*s/q from algorithm's power metric submitted to leaderboard and convert to KwH/q. - * Multiply by total queries at 100K qps for 4 years = 4x365x24x60x60x100000 total queries over 4 years. - * Opex = (total queries over 4 years) * KwH/query * $0.10/KwH -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *final* = final submission - * *inprog* = algorithm development still in progress - -#### Text2Image - -##### Text2Image Recall Rankings - -|Rank|Submission |Team |Hardware |Status |R@10 | -|----|-------------------------------------------------------|------------------------------|-----------------------|--------|-------------| -| 1|[diskann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/diskann-bare-metal/README.md) |Microsoft Research(*org*) |Dell PowerEdge |inprog|**[0.98130](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/diskann-bare-metal/text2image-1B_recall.png)** | -| 2|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md) |NVidia |NVidia GPU |inprog|**[0.94136](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/text2image-1B_recall.png)** | -| 3|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md) |Intel |Intel Optane |inprog|**[0.90277](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/text2image-1B_recall.png)** | -| 4|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |inprog|**[0.88585](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/text2image-1B_recall.png)** | -| 5|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |final|**[0.86028](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/text2image-1B_recall.png)** | -| 6|[-](-) |- |- |-|**-** | -| 7| -| -| -| -| -| -| 8| -| -| -| -| -| -| 9| -| -| -| -| -| -| 10| -| -| -| -| -| - -* The operational point for ranking is 2000 QPS. We will use the highest recall for the search parameters that meet or exceed 2000 QPS. -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *final* = final submission - * *inprog* = algorithm development still in progress - -##### Text2Image Throughput Rankings - -|Rank|Submission |Team |Hardware |Status |Q/S | -|----|-------------------------------------------------------|------------------------------|-----------------------|--------|------------| -| 1|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md) |Intel |Intel Optane |inprog|**[10,838.358](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/text2image-1B_throughput.png)** | -| 2|[diskann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/diskann-bare-metal/README.md) |Microsoft Research(*org*) |Dell PowerEdge |inprog|**[9,306.610](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/diskann-bare-metal/text2image-1B_throughput.png)** | -| 3|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md) |NVidia |NVidia GPU |inprog|**[9,249.383](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/text2image-1B_throughput.png)** | -| 4|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |inprog|**[1,864.634](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/text2image-1B_throughput.png)** | -| 5|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |final|**[1,762.363](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/text2image-1B_throughput.png)** | -| 6|[-](-) |- |- |-|**-** | -| 7| -| -| -| -| -| -| 8| -| -| -| -| -| -| 9| -| -| -| -| -| -| 10| -| -| -| -| -| - -* The operational point for ranking is 0.860 recall@10. We will use the highest throughput for the search parameters that meet or exceed 0.860 recall@10. -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *final* = final submission - * *inprog* = algorithm development still in progress - -##### Text2Image Power Rankings - -|Rank|Submission |Team |Hardware |Status |W*S/Q | -|----|-------------------------------------------------------|------------------------------|-----------------------|--------|-----------| -| 1|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md) |NVidia |NVidia GPU |inprog|**[0.0222](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/text2image-1B_power.png)**| -| 2|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md) |Intel |Intel Optane |inprog|**[0.0702](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/text2image-1B_power.png)**| -| 3|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |final|**[0.1233](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/text2image-1B_power.png)**| -| 4|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |inprog|**[0.1759](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/text2image-1B_power.png)**| -| 5|[-](-) |- |- |-|**-**| -| 6|[-](-) |- |- |-|**-**| -| 6| -| -| -| -| -| -| 7| -| -| -| -| -| -| 8| -| -| -| -| -| -| 9| -| -| -| -| -| -| 10| -| -| -| -| -| - -* The operational point for ranking is 0.86 recall@10. We will use the lowest power consumption for the search parameters that meet or exceed 0.86 recall@10. -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *final* = final submission - * *inprog* = algorithm development still in progress - -##### Text2Image Cost Rankings - -|Rank|Submission |Team |Hardware |Status |Cost |capex |opex |unit cost|units@100K qps|KwH*4yrs | -|----|---------------------|------------------------------|---------------------|--------|--------------|--------|--------|---------|--------------|---------| -| 1|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md) |Intel |Intel Optane |inprog|**$171,244.96** |$146,642.00|$24,602.96|$14,664.20 |10 |246,029.624| -| 2|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |final|**$1,298,436.77** |$1,255,248.30|$43,188.47|$22,021.90 |57 |431,884.673| -| 3|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |inprog|**$3,070,882.16** |$3,009,239.64|$61,642.52|$55,726.66 |54 |616,425.231| -| 4|[-](-) |- |- |-|**-** |-|-|- |- |-| -| 5|[-](-) |- |- |-|**-** |-|-|- |- |-| -| 6|[-](-) |- |- |-|**-** |-|-|- |- |-| -| 7| -| -| -| -| -| -| -| -| -| -| -| 8| -| -| -| -| -| -| -| -| -| -| -| 9| -| -| -| -| -| -| -| -| -| -| -| 10| -| -| -| -| -| -| -| -| -| -| - -* The operational point for ranking is 0.86 recall@10. We will use the lowest power consumption/query for the search parameters that meet or exceed 0.86 recall@10. -* The formula is based on: - * Take the algorithm's throughput submitted to leaderboard, use it to scale no. of systems needed to scale to 100K qps (using ceiling to round up any decimal.) - * Capex = cost per system * scale no. - * Take w*s/q from algorithm's power metric submitted to leaderboard and convert to KwH/q. - * Multiply by total queries at 100K qps for 4 years = 4x365x24x60x60x100000 total queries over 4 years. - * Opex = (total queries over 4 years) * KwH/query * $0.10/KwH -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *final* = final submission - * *inprog* = algorithm development still in progress - -#### FBSimSearchNet - -##### FBSimSearchNet AP Rankings - -|Rank|Submission |Team |Hardware |Status |AP | -|----|-------------------------------------------------------|------------------------------|-----------------------|--------|-------------| -| 1|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |inprog|**[0.99053](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/ssnpp-1B_recall.png)** | -| 2|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |final|**[0.97863](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/ssnpp-1B_recall.png)** | -| 3|[-](-) |- |- |-|**-** | -| 4|[-](-) |- |- |-|**-** | -| 5|[-](-) |- |- |-|**-** | -| 6|[-](-) |- |- |-|**-** | -| 7| -| -| -| -| -| -| 8| -| -| -| -| -| -| 9| -| -| -| -| -| -| 10| -| -| -| -| -| - -* The operational point for ranking is 2000 QPS. We will use the highest recall for the search parameters that meet or exceed 2000 QPS. -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *final* = final submission - * *inprog* = algorithm development still in progress - -##### FBSimSearchNet Throughput Rankings - -|Rank|Submission |Team |Hardware |Status |Q/S | -|----|-------------------------------------------------------|------------------------------|-----------------------|--------|------------| -| 1|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |inprog|**[8,123.552](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/ssnpp-1B_throughput.png)** | -| 2|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |final|**[5,572.272](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/ssnpp-1B_throughput.png)** | -| 3|[-](-) |- |- |-|**-** | -| 4|[-](-) |- |- |-|**-** | -| 5|[-](-) |- |- |-|**-** | -| 6|[-](-) |- |- |-|**-** | -| 7| -| -| -| -| -| -| 8| -| -| -| -| -| -| 9| -| -| -| -| -| -| 10| -| -| -| -| -| - -* The operational point for ranking is 0.9 recall@10. We will use the highest throughput for the search parameters that meet or exceed 0.9 average precision. -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *final* = final submission - * *inprog* = algorithm development still in progress - - -##### FBSimSearchNet Power Rankings - -|Rank|Submission |Team |Hardware |Status |W*S/Q | -|----|-------------------------------------------------------|------------------------------|-----------------------|--------|-----------| -| 1|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |inprog|**[0.0540](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/ssnpp-1B_power.png)**| -| 2|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |final|**[0.0949](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/ssnpp-1B_power.png)**| -| 3|[-](-) |- |- |-|**-**| -| 4|[-](-) |- |- |-|**-**| -| 5|[-](-) |- |- |-|**-**| -| 6|[-](-) |- |- |-|**-**| -| 7| -| -| -| -| -| -| 8| -| -| -| -| -| -| 9| -| -| -| -| -| -| 10| -| -| -| -| -| - -* The operational point for ranking is 0.9 recall@10. We will use the lowest power consumption for the search parameters that meet or exceed 0.9 average precision. -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *final* = final submission - * *inprog* = algorithm development still in progress - -##### FBSimSearchNet Cost Rankings - -|Rank|Submission |Team |Hardware |Status |Cost |capex |opex |unit cost|units@100K qps|KwH*4yrs | -|----|--------------------|------------------------------|---------------------|--------|--------------|--------|--------|---------|--------------|---------| -| 1|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md)|Facebook Research(*org*) |NVidia GPU |final|**$429,634.84** |$396,394.20|$33,240.64|$22,021.90 |18 |332,406.441| -| 2|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md)|GSI Technology(*org*) |LedaE APU |inprog|**$743,385.69** | -| -| -| -| -| -| 3|[-](-)|- |- |-|**-** | -| -| -| -| -| -| 4|[-](-)|- |- |-|**-** | -| -| -| -| -| -| 5| -| -| -| -| -| -| -| -| -| -| -| 6| -| -| -| -| -| -| -| -| -| -| -| 7| -| -| -| -| -| -| -| -| -| -| -| 8| -| -| -| -| -| -| -| -| -| -| -| 9| -| -| -| -| -| -| -| -| -| -| -| 10| -| -| -| -| -| -| -| -| -| -| - -* The operational point for ranking is 0.9 recall@10. We will use the lowest power consumption/query for the search parameters that meet or exceed 0.9 recall@10. -* The formula is based on: - * Take the algorithm's throughput submitted to leaderboard, use it to scale no. of systems needed to scale to 100K qps (using ceiling to round up any decimal.) - * Capex = cost per system * scale no. - * Take w*s/q from algorithm's power metric submitted to leaderboard and convert to KwH/q. - * Multiply by total queries at 100K qps for 4 years = 4x365x24x60x60x100000 total queries over 4 years. - * Opex = (total queries over 4 years) * KwH/query * $0.10/KwH -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *final* = final submission - * *inprog* = algorithm development still in progress - diff --git a/t3/LB_history/Nov.29.2021/TASKS_ISSUES_RESOLUTIONS.md b/t3/LB_history/Nov.29.2021/TASKS_ISSUES_RESOLUTIONS.md deleted file mode 100644 index e9a750712..000000000 --- a/t3/LB_history/Nov.29.2021/TASKS_ISSUES_RESOLUTIONS.md +++ /dev/null @@ -1,42 +0,0 @@ - -# BigANN Challenge T3 Tasks, Issues, and Resolutions - -In the spirit of maintaining a fair and open competition, we will be tracking all important remaining tasks and issues, and their respective resolution - and making that all public here on this README. All competition rankings and winners will be "unofficial" until all tasks and issues have been resolved. - -Participants should send their questions and issues to the T3 organizer directly (gwilliams@gsitechnology.com), or to the competition google group at big-ann-organizers@googlegroups.com. Note that some issues may require a complete re-evaluation of an algorithm on its respective hardware, or may require additional information from a participant or competition organizer(s). - -## Tasks (open) - -* [T3 Organizer to Microsoft] Currently reported DiskANN CSV results is using an old version of recall computation (ie, not accounting for ties and it will likely affect msspacev-1B recall mostly). - * PENDING RESOLUTION: [Microsoft to T3 Organizer] Will re-base and send the CSV. -* [Microsoft to T3 Organizer] Currently, DiskANN cannot qualify for power and cost benchmarks due to issue with running IPMICAP ( python ipmi in particular seems to be the issue. ) - * PENDING RESOLUTION: [T3 Organizer to Microsoft] We will work on local dcmi support in the IPMICAP server. -* [GSI to T3 Organizer] New index for SSNPP and Text2Image requires re-evaluation for those datasets and updated scores. - * PENDING RESOLUTION: [T3 Organizer to GSI] We ran SSNPP to completion, but having issues with Text2Image. -* [T3 Organizer to Microsoft] Need to retrieve "results" h5py files from MS DiskANN remote machine. - -## Issues (open) - -* [Intel asks T3 Organizer] Why won't there be one winner for T3 that combines all individual benchmarks? - * PENDING RESOLUTION: [T3 Organizer to Intel] We have provided the reason. Hopefully its a good enough explanation and we can soon remove this issue. -* [Intel asks T3 Organizer] Why are power and cost rankings optional for a submission? - * PENDING RESOLUTION: [T3 Organizer to Intel] We have provided the reason. Hopefully its a good enough explanation and we can soon remove this issue. -* [GSI to T3 Organizers] We cannot reproduce the baseline performance on SSNPP on same/similar hardware. - * PENDING RESOLUTION: [T3 Organizer to GSI] We've reproduced on sent the results. Please approve. -* [T3 Organizer asks NVidia] Can't we use an MSRP from another company as proxy for system cost? - * PENDING RESOLUTION: [T3 Organizer to NVidia] We will take the cheapest MSRP from third party seller. Please approve. -* [GSI to T3 Organizers] Have you discussed taking power also on the recall working point and not just on the throughput working point? -[GSI asks T3 Organizers] Since some algorithms implement smart caching mechanisms to simulate real life scenarios and since the competition framework sends the same queries again and again 50 time for each dataset (5 runs x 10 query configurations) which is not a real life case. It is important that competition framework needs to verify the results, automatically (and if not possible manually) that no caching mechanism is used in between runs and in between query configurations. One way is to make sure that the throughput for the runs doesn’t differ much taking into account that there are 5 runs and 10 configurations with the same queries. Probably a better way is to send for different queries or somehow cool down the cache in between runs by sending random queries. - * PENDING RESOLUTION: [T3 Organizers to GSI] We will add "cache detection" countermeasure to the framework and reevaulate all submissions. - -## Resolutions - -* [GSI asked] What does NQ mean? - * [T3 Organizer responded] It could mean 1) team did not submit a qualifying algorithm for the benchmark 2) team decided did not participate in that benchmark 3) unable to get some key data for the benchmark (such as power or system cost, or both ). -* [T3 Organizer self-report] Need to retrieve "results" h5py files from NVidia's remote machine. - * Done on 11/23/2021 -* [T3 Organizer to NVidia] Need to retrieve power monitoring "results" h5py files from NVidia's remote machine. - * Done on 11/23/2021 and subsequently on changes to algos.yaml -* [GSI to T3 Organizer] Need better documentation for how to extract power benchmark from plot.py script. - * Answered via email. Basically, you need to supply "wspq" as an explicit metric you want to retrieve using the chosen axis. Run "python ploy.py --help" to get more information. - diff --git a/t3/LEADERBOARDS.md b/t3/LEADERBOARDS.md deleted file mode 100644 index 801c76605..000000000 --- a/t3/LEADERBOARDS.md +++ /dev/null @@ -1,41 +0,0 @@ - -# BigANN Challenge T3 Leaderboards and Winners - -We rank participants based on 4 different but inter-related benchmarks: -* One based on recall/average precision -* One based on throughput -* One based on power consumption -* One based on hardware cost - -We maintain two sets of leaderboards that rank participants on all benchmarks: -* [Leaderboards based on a public query dataset](LEADERBOARDS_PUBLIC.md) in which participants had access during the competition. -* [Leaderboards based on a private query dataet](LEADERBOARDS_PRIVATE.md) in which submissions are currently being evaluated. - -Please consult the main [T3 track README](README.md) for more details about benchmarks and ranking methodology. - -## Public Dataset Leaderboards And Winners - -The leaderboards and rankings on the public dataset set lives [here](LEADERBOARDS_PUBLIC.md). - -We would like to congratulate all the winners of this part of the competition, teams from Intel and NVidia: -* Sourabh Dongaonkar (Intel Corporate) -* Mariano Tepper (Intel Labs) -* Yong Wong (NVidia) -* Akira Naruse (NVidia) -* Jingrong Zhang (NVidia) -* Mahesh Doijade (NVidia) - -We are in the process of resolving the remaining issues and tasks. - -Upon completion, we will make the rankings and winners official. - -Please revisit this page again soon for more updates. - -## Private Dataset Leaderboards Status - -The status of the leaderboards and rankings on the private dataset lives [here](LEADERBOARDS_PRIVATE.md). - -All submissions are currently being evaluated using the private data sets so the scores (and rankings) could change as evaluation proceeds - -Please revisit this page again soon for more updates. - diff --git a/t3/LEADERBOARDS_PRIVATE.md b/t3/LEADERBOARDS_PRIVATE.md deleted file mode 100644 index 5b2a588cc..000000000 --- a/t3/LEADERBOARDS_PRIVATE.md +++ /dev/null @@ -1,566 +0,0 @@ -# T3 Track Private Dataset Leaderboards - -Please note that all rankings and winners are unofficial until all [open tasks and issues](TASKS_ISSUES_RESOLUTIONS.md) are resolved. - -## Rankings By Category - -### Rankings By Submission Name (alphabetical) - -|Submission |Team |Hardware |[Recall Rank](#recall-or-ap-rankings)|[Thru-put Rank](#throughput-rankings)|[Power Rank](#power-rankings)|[Cost Rank](#cost-rankings)|Status |Anomalies|Evaluator|Algo |Runs | -|--------------------|-----------|----------|---------|---------|---------|--------|---------|---------|---------|---------|--------| -|[diskann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/diskann-bare-metal/README.md) |Microsoft Research India(*org*) |Dell PowerEdge |*NA* |*NA* |*NA* |*NA* |eval |*NA* |[Harsha Simhadri](https://github.com/harsha-simhadri) |[src](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/benchmark/algorithms/diskann-t2.py) |NA | -|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |[5](#recall-or-ap-rankings) |[5](#throughput-rankings) |[5](#power-rankings) |[4](#cost-rankings) |eval |0/49 |[George Williams](https://github.com/sourcesync) |[src](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/benchmark/algorithms/faiss_t3.py) |[nb](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/EvalPrivate.ipynb) | -|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |[3](#recall-or-ap-rankings) |[4](#throughput-rankings) |[4](#power-rankings) |[5](#cost-rankings) |eval |0/50 |[George Williams](https://github.com/sourcesync) |[src](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/benchmark/algorithms/gemini.py) |[nb](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/EvalPrivate.ipynb) | -|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md) |NVidia |NVidia GPU |[1](#recall-or-ap-rankings) |[3](#throughput-rankings) |[1](#power-rankings) |[2](#cost-rankings)\*\* |eval |[6/40](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/ANOMALIES.md) |[George Williams](https://github.com/sourcesync) |[src](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/benchmark/algorithms/cuanns_ivfpq.py) |[nb](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/EvalPrivate.ipynb) | -|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md) |NVidia |NVidia GPU |[4](#recall-or-ap-rankings) |[1](#throughput-rankings) |[3](#power-rankings) |[3](#cost-rankings)\*\* |eval |[3/30](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/ANOMALIES.md) |[George Williams](https://github.com/sourcesync) |[src](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/benchmark/algorithms/cuanns_multigpu.py) |NA | -|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md)|Intel |Intel Optane |[2](#recall-or-ap-rankings) |[2](#throughput-rankings) |[2](#power-rankings) |[1](#cost-rankings)|eval |[4/40](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/ANOMALIES.md) |[George Williams](https://github.com/sourcesync) |[src](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/benchmark/algorithms/graphann.py) |[nb](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/EvalPrivate.ipynb)| - -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *eval* = final submissions are being evaluated. - * *final* = final submission and ranking. - * *NQ* = not qualified - * *NA* = data is not yet available, or has not yet been processed - -* *Anomalies* are defined as queries that could potentially be the result of query response caching, a violation of the competition. Our detection method looks for a 30% or more improvement in the batch query latency between the first and last query of a query group (5). Participants have been given a chance to explain why detected anomalies (if any) are not a result of query response caching. In general, our analysis did not uncover this symptom of systematic query response caching from any submission. Also, if we throw out the anomalous data points, the [adjusted leaderboard rankings](LEADERBOARDS_PRIVATE_REJECT_ANOMALIES.md) do not change even though some scores change slightly. - -* \*\*Nvidia has not yet approved the MSRP cost used for ranking, so participation in this benchmark is still pending. - -### Rankings Per Benchmark - -#### Recall Or AP Rankings - -|Rank|Submission |Team |Hardware|Status |Score |[Deep1B](#deep1B-recall-rankings)|[BigANN](#bigann-recall-rankings)|[MSTuring](#msturing-recall-rankings)|[MSSpace](#msspace-recall-rankings)|[Text2Image](#text2image-recall-rankings)|[FBSSNet](#fbsimsearchnet-ap-rankings)| -|----|------------------|-------|--------|-------|-----------|------|------|--------|-------|----------|-------| -| 1|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md)|NVidia|NVidia GPU |eval|**0.225**|[0.99541](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/private_deep-1B_recall.png) |[0.99882](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/private_bigann-1B_recall.png) |[0.99054](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/private_msturing-1B_recall.png) |- |[0.63732](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/private_text2image-1B_recall.png) |- | -| 2|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md)|Intel|Intel Optane |eval|**0.224**|[0.99872](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/private_deep-1B_recall.png) |[0.99977](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/private_bigann-1B_recall.png) |[0.99561](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/private_msturing-1B_recall.png) |- |[0.62692](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/private_text2image-1B_recall.png) |- | -| 3|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md)|GSI Technology(*org*)|LedaE APU |eval|**0.197**|[0.99185](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/private_deep-1B_recall.png) |[0.99452](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/private_bigann-1B_recall.png) |[0.97719](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/private_msturing-1B_recall.png) |- |[0.61895](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/private_text2image-1B_recall.png) |[0.99672](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/private_ssnpp-1B_recall.png) | -| 4|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md)|NVidia|NVidia GPU |eval|**0.182**|[0.99551](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_multigpu/private_deep-1B_recall.png) |[0.99872](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_multigpu/private_bigann-1B_recall.png) |[0.98330](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_multigpu/private_msturing-1B_recall.png) |- |- |- | -| 5|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md)|Facebook Research(*org*)|NVidia GPU |eval|**baseline**|[0.94437](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/private_deep-1B_recall.png) |[0.93604](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/private_bigann-1B_recall.png) |[0.91513](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/private_msturing-1B_recall.png) |- |[0.60105](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/private_text2image-1B_recall.png) |[0.98567](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/private_ssnpp-1B_recall.png) | -| 6|[-](-)|-|- |-|**-**|- |- |- |- |- |- | - -* A submission must support at least 3 databases to qualify for this ranking. -* The ranking is based on the score, which is the sum of benchmark improvements of qualifying databases (shown in specific database columns after the score column.) -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *eval* = final submissions are being evaluated. - * *final* = final submission and ranking. - -#### Throughput Rankings - -|Rank|Submission |Team |Hardware|Status |Score |[Deep1B](#deep1B-throughput-rankings)|[BigANN](#bigann-throughput-rankings)|[MSTuring](#msturing-throughput-rankings)|[MSSpace](#msspace-throughput-rankings)|[Text2Image](#text2image-throughput-rankings)|[FBSSNet](#fbsimsearchnet-throughput-rankings)| -|----|------------------|-------|--------|-------|-----------|------|------|--------|-------|----------|--------------| -| 1|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md)|NVidia|NVidia GPU |eval|**2116124**|[968,840](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_multigpu/private_deep-1B_throughput.png) |[764,700](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_multigpu/private_bigann-1B_throughput.png) |[393,528](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_multigpu/private_msturing-1B_throughput.png) |- |- |- | -| 2|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md)|Intel|Intel Optane |eval|**685763**|[210,403](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/private_deep-1B_throughput.png) |[350,007](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/private_bigann-1B_throughput.png) |[127,883](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/private_msturing-1B_throughput.png) |- |[10,364](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/private_text2image-1B_throughput.png) |- | -| 3|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md)|NVidia|NVidia GPU |eval|**297608**|[94,042](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/private_deep-1B_throughput.png) |[84,806](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/private_bigann-1B_throughput.png) |[110,886](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/private_msturing-1B_throughput.png) |- |[20,765](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/private_text2image-1B_throughput.png) |- | -| 4|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md)|GSI Technology(*org*)|LedaE APU |eval|**30425**|[12,345](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/private_deep-1B_throughput.png) |[10,868](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/private_bigann-1B_throughput.png) |[14,862](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/private_msturing-1B_throughput.png) |- |[2,154](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/private_text2image-1B_throughput.png) |[9,343](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/private_ssnpp-1B_throughput.png) | -| 5|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md)|Facebook Research(*org*)|NVidia GPU |eval|**baseline**|[5,035](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/private_deep-1B_throughput.png) |[3,279](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/private_bigann-1B_throughput.png) |[2,630](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/private_msturing-1B_throughput.png) |- |[1,948](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/private_text2image-1B_throughput.png) |[6,256](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/private_ssnpp-1B_throughput.png) | -| 6|[-](-)|-|- |-|**-**|- |- |- |- |- |- | - -* A submission must support at least 3 databases to qualify for this ranking. -* The ranking is based on the score, which is the sum of benchmark improvements of qualifying databases (shown in specific database columns after the score column.) -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *eval* = final submissions are being evaluated. - * *final* = final submission and ranking. - -#### Power Rankings - -|Rank|Submission |Team |Hardware|Status |Score |[Deep1B](#deep1B-power-rankings)|[BigANN](#bigann-power-rankings)|[MSTuring](#msturing-power-rankings)|[MSSpace](#msspace-power-rankings)|[Text2Image](#text2image-power-rankings)|[FBSSNet](#fbsimsearchnet-power-rankings)| -|----|------------------|-------|--------|-------|-----------|------|------|--------|-------|-----|-----| -| 1|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md)|NVidia|NVidia GPU |eval|**-0.475**|[0.0104](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/private_deep-1B_power.png) |[0.0119](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/private_bigann-1B_power.png) |[0.0099](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/private_msturing-1B_power.png) |- |[0.0441](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/private_text2image-1B_power.png)|-| -| 2|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md)|Intel|Intel Optane |eval|**-0.469**|[0.0036](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/private_deep-1B_power.png) |[0.0020](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/private_bigann-1B_power.png) |[0.0054](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/private_msturing-1B_power.png) |- |[0.0710](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/private_text2image-1B_power.png)|-| -| 3|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md)|NVidia|NVidia GPU |eval|**-0.431**|[0.0025](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_multigpu/private_deep-1B_power.png) |[0.0022](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_multigpu/private_bigann-1B_power.png) |[0.0061](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_multigpu/private_msturing-1B_power.png) |- |-|-| -| 4|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md)|GSI Technology(*org*)|LedaE APU |eval|**-0.232**|[0.0352](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/private_deep-1B_power.png) |[0.0342](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/private_bigann-1B_power.png) |[0.0250](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/private_msturing-1B_power.png) |- |[0.2661](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/private_text2image-1B_power.png)|[0.0433](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/private_ssnpp-1B_power.png)| -| 5|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md)|Facebook Research(*org*)|NVidia GPU |eval|**baseline**|[0.0923](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/private_deep-1B_power.png) |[0.1623](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/private_bigann-1B_power.png) |[0.1874](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/private_msturing-1B_power.png) |- |[0.1091](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/private_text2image-1B_power.png)|[0.0847](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/private_ssnpp-1B_power.png)| -| 6|[-](-)|-|- |-|**-**|- |- |- |- |-|-| - -* A submission must support at least 3 databases to qualify for this ranking. -* The ranking is based on the score, which is the sum of benchmark improvements of qualifying databases (shown in specific database columns after the score column.) -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *eval* = final submissions are being evaluated. - * *final* = final submission and ranking. - -#### Cost Rankings - -|Rank|Submission |Team |Hardware|Status |Score |[Deep1B](#deep1B-cost-rankings)|[BigANN](#bigann-cost-rankings)|[MSTuring](#msturing-cost-rankings)|[MSSpace](#msspace-cost-rankings)|[Text2Image](#text2image-cost-rankings)|[FBSSNet](#fbsimsearchnet-cost-rankings)| -|----|------------------|-------|--------|-------|-----------|------|------|--------|-------|----------|--------------| -| 1|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md)|Intel|Intel Optane |eval|**$-3,100,829.55**|$15,918.69 |$15,372.40 |$16,572.66 |- |$171,523.05 |- | -| 2|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md)|NVidia|NVidia GPU |eval|**$-1,793,445.89\*\***|$303,647.91 |$304,177.20 |$153,483.45 |- |$765,461.90 |- | -| 3|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md)|NVidia|NVidia GPU |eval|**$-1,683,055.85\*\***|$150,863.29 |$150,786.43 |$152,142.96 |- |- |- | -| 4|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md)|Facebook Research(*org*)|NVidia GPU |eval|**baseline**|$472,764.09 |$739,552.84 |$924,531.60 |- |$1,183,367.83 |$382,013.99 | -| 5|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md)|GSI Technology(*org*)|LedaE APU |eval|**$1,120,285.77**|$513,869.50 |$569,253.60 |$398,832.13 |- |$2,712,384.33 |$628,176.55 | -| 6|[-](-)|-|- |-|**-**|- |- |- |- |- |- | - -* A submission must support at least 3 databases to qualify for this ranking. -* The ranking is based on the score, which is the sum of benchmark improvements of qualifying databases (shown in specific database columns after the score column.) -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *eval* = final submissions are being evaluated. - * *final* = final submission and ranking. - -* \*\*Nvidia has not yet approved the MSRP cost used for ranking, so participation in this benchmark is still pending. - -### Rankings Per Database - -#### Deep1B - -##### Deep1B Recall Rankings - -|Rank|Submission |Team |Hardware |Status |R@10 | -|----|-------------------------------------------------------|------------------------------|-----------------------|--------|-----------| -| 1|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md) |Intel |Intel Optane |eval|**[0.99872](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/private_deep-1B_recall.png)**| -| 2|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md) |NVidia |NVidia GPU |eval|**[0.99551](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_multigpu/private_deep-1B_recall.png)**| -| 3|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md) |NVidia |NVidia GPU |eval|**[0.99541](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/private_deep-1B_recall.png)**| -| 4|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |eval|**[0.99185](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/private_deep-1B_recall.png)**| -| 5|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |eval|**[0.94437](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/private_deep-1B_recall.png)**| -| 6|[-](-) |- |- |-|**-**| - -* The operational point for ranking is 2000 QPS. We will use the highest recall for the search parameters that meet or exceed 2000 QPS. -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *eval* = final submissions are being evaluated. - * *final* = final submission and ranking. - -##### Deep1B Throughput Rankings - -|Rank|Submission |Team |Hardware |Status |Q/S | -|----|-------------------------------------------------------|------------------------------|-----------------------|--------|-----------| -| 1|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md) |NVidia |NVidia GPU |eval|**[968,840](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_multigpu/private_deep-1B_throughput.png)**| -| 2|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md) |Intel |Intel Optane |eval|**[210,403](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/private_deep-1B_throughput.png)**| -| 3|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md) |NVidia |NVidia GPU |eval|**[94,042](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/private_deep-1B_throughput.png)**| -| 4|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |eval|**[12,345](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/private_deep-1B_throughput.png)**| -| 5|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |eval|**[5,035](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/private_deep-1B_throughput.png)**| -| 6|[-](-) |- |- |-|**-**| - -* The operational point for ranking is 0.90 recall@10. We will use the highest throughput for the search parameters that meet or exceed 0.90 recall@10. -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *eval* = final submissions are being evaluated. - * *final* = final submission and ranking. - -##### Deep1B Power Rankings - -|Rank|Submission |Team |Hardware |Status |W*S/Q | -|----|-------------------------------------------------------|------------------------------|-----------------------|--------|-----------| -| 1|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md) |NVidia |NVidia GPU |eval|**[0.0025](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_multigpu/private_deep-1B_power.png)**| -| 2|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md) |Intel |Intel Optane |eval|**[0.0036](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/private_deep-1B_power.png)**| -| 3|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md) |NVidia |NVidia GPU |eval|**[0.0104](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/private_deep-1B_power.png)**| -| 4|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |eval|**[0.0352](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/private_deep-1B_power.png)**| -| 5|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |eval|**[0.0923](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/private_deep-1B_power.png)**| -| 6|[-](-) |- |- |-|**-**| - -* The operational point for ranking is 0.90 recall@10. We will use the lowest power consumption for the search parameters that meet or exceed 0.90 recall@10. -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *eval* = final submissions are being evaluated. - * *final* = final submission and ranking. - -##### Deep1B Cost Rankings - -|Rank|Submission |Team |Hardware |Status |Cost |capex |opex |unit cost|units@100K qps|KwH*4yrs | -|----|--------------------|---------|-----------------------|--------|-------------|--------|--------|---------|--------------|---------| -| 1|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md)|Intel |Intel Optane |eval|**$15,918.69** |$14,664.20|$1,254.49|$14,664.20 |1 |12,544.902| -| 2|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md)|NVidia |NVidia GPU |eval|**$150,863.29\*\*** |$150,000.00|$863.29|$150,000.00 |1 |8,632.870| -| 3|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md)|NVidia |NVidia GPU |eval|**$303,647.91\*\*** |$300,000.00|$3,647.91|$150,000.00 |2 |36,479.145| -| 4|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md)|Facebook Research(*org*) |NVidia GPU |eval|**$472,764.09** |$440,438.00|$32,326.09|$22,021.90 |20 |323,260.873| -| 5|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md)|GSI Technology(*org*) |LedaE APU |eval|**$513,869.50** |$501,539.94|$12,329.56|$55,726.66 |9 |123,295.635| -| 6|[-](-)|- |- |-|**-** |-|-|- |- |-| - -* The operational point for ranking is 0.90 recall@10. We will use the lowest power consumption/query for the search parameters that meet or exceed 0.90 recall@10. -* The formula is based on: - * Take the algorithm's throughput submitted to leaderboard, use it to scale no. of systems needed to scale to 100K qps (using ceiling to round up any decimal.) - * Capex = cost per system * scale no. - * Take w*s/q from algorithm's power metric submitted to leaderboard and convert to KwH/q. - * Multiply by total queries at 100K qps for 4 years = 4x365x24x60x60x100000 total queries - * Opex = (total queries over 4 years) * KwH/query * $0.10/KwH -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *eval* = final submissions are being evaluated. - * *final* = final submission and ranking. -* \*\*Nvidia has not yet approved the MSRP cost used for ranking, so participation in this benchmark is still pending. - -#### BigANN - -##### BigANN Recall Rankings - -|Rank|Submission |Team |Hardware |Status |R@10 | -|----|-------------------------------------------------------|------------------------------|-----------------------|--------|-------------| -| 1|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md) |Intel |Intel Optane |eval|**[0.99977](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/private_bigann-1B_recall.png)** | -| 2|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md) |NVidia |NVidia GPU |eval|**[0.99882](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/private_bigann-1B_recall.png)** | -| 3|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md) |NVidia |NVidia GPU |eval|**[0.99872](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_multigpu/private_bigann-1B_recall.png)** | -| 4|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |eval|**[0.99452](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/private_bigann-1B_recall.png)** | -| 5|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |eval|**[0.93604](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/private_bigann-1B_recall.png)** | -| 6|[-](-) |- |- |-|**-** | - -* The operational point for ranking is 2000 QPS. We will use the highest recall for the search parameters that meet or exceed 2000 QPS. -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *eval* = final submissions are being evaluated. - * *final* = final submission and ranking. - -##### BigANN Throughput Rankings - -|Rank|Submission |Team |Hardware |Status |Q/S | -|----|-------------------------------------------------------|------------------------------|-----------------------|--------|-------------| -| 1|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md) |NVidia |NVidia GPU |eval|**[764,700](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_multigpu/private_bigann-1B_throughput.png)** | -| 2|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md) |Intel |Intel Optane |eval|**[350,007](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/private_bigann-1B_throughput.png)** | -| 3|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md) |NVidia |NVidia GPU |eval|**[84,806](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/private_bigann-1B_throughput.png)** | -| 4|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |eval|**[10,868](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/private_bigann-1B_throughput.png)** | -| 5|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |eval|**[3,279](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/private_bigann-1B_throughput.png)** | -| 6|[-](-) |- |- |-|**-** | - -* The operational point for ranking is 0.90 recall@10. We will use the highest throughput for the search parameters that meet or exceed 0.90 recall@10. -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *eval* = final submissions are being evaluated. - * *final* = final submission and ranking. - -##### BigANN Power Rankings - -|Rank|Submission |Team |Hardware |Status |W*S/Q | -|----|-------------------------------------------------------|------------------------------|-----------------------|--------|-----------| -| 1|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md) |Intel |Intel Optane |eval|**[0.0020](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/private_bigann-1B_power.png)**| -| 2|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md) |NVidia |NVidia GPU |eval|**[0.0022](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_multigpu/private_bigann-1B_power.png)**| -| 3|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md) |NVidia |NVidia GPU |eval|**[0.0119](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/private_bigann-1B_power.png)**| -| 4|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |eval|**[0.0342](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/private_bigann-1B_power.png)**| -| 5|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |eval|**[0.1623](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/private_bigann-1B_power.png)**| -| 6|[-](-) |- |- |-|**-**| - -* The operational point for ranking is 0.90 recall@10. We will use the lowest power consumption for the search parameters that meet or exceed 0.90 recall@10. -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *eval* = final submissions are being evaluated. - * *final* = final submission and ranking. - -##### BigANN Cost Rankings - -|Rank|Submission |Team |Hardware |Status |Cost |capex |opex |unit cost|units@100K qps|KwH*4yrs | -|----|--------------------|------------------------------|--------------------|--------|--------------|--------|--------|---------|--------------|---------| -| 1|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md)|Intel |Intel Optane |eval|**$15,372.40** |$14,664.20|$708.20|$14,664.20 |1 |7,082.016| -| 2|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md)|NVidia |NVidia GPU |eval|**$150,786.43\*\*** |$150,000.00|$786.43|$150,000.00 |1 |7,864.254| -| 3|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md)|NVidia |NVidia GPU |eval|**$304,177.20\*\*** |$300,000.00|$4,177.20|$150,000.00 |2 |41,771.962| -| 4|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md)|GSI Technology(*org*) |LedaE APU |eval|**$569,253.60** |$557,266.60|$11,987.00|$55,726.66 |10 |119,870.019| -| 5|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md)|Facebook Research(*org*) |NVidia GPU |eval|**$739,552.84** |$682,678.90|$56,873.94|$22,021.90 |31 |568,739.415| -| 6|[-](-)|- |- |-|**-** |-|-|- |- |-| - -* The operational point for ranking is 0.90 recall@10. We will use the lowest power consumption/query for the search parameters that meet or exceed 0.90 recall@10. -* The formula is based on: - * Take the algorithm's throughput submitted to leaderboard, use it to scale no. of systems needed to scale to 100K qps (using ceiling to round up any decimal.) - * Capex = cost per system * scale no. - * Take w*s/q from algorithm's power metric submitted to leaderboard and convert to KwH/q. - * Multiply by total queries at 100K qps for 4 years = 4x365x24x60x60x100000 total queries over 4 years. - * Opex = (total queries over 4 years) * KwH/query * $0.10/KwH -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *eval* = final submissions are being evaluated. - * *final* = final submission and ranking. -* \*\*Nvidia has not yet approved the MSRP cost used for ranking, so participation in this benchmark is still pending. - -#### MSTuring - -##### MSTuring Recall Rankings - -|Rank|Submission |Team |Hardware |Status |R@10 | -|----|-------------------------------------------------------|------------------------------|-----------------------|--------|---------------| -| 1|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md) |Intel |Intel Optane |eval|**[0.99561](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/private_msturing-1B_recall.png)** | -| 2|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md) |NVidia |NVidia GPU |eval|**[0.99054](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/private_msturing-1B_recall.png)** | -| 3|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md) |NVidia |NVidia GPU |eval|**[0.98330](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_multigpu/private_msturing-1B_recall.png)** | -| 4|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |eval|**[0.97719](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/private_msturing-1B_recall.png)** | -| 5|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |eval|**[0.91513](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/private_msturing-1B_recall.png)** | -| 6|[-](-) |- |- |-|**-** | - -* The operational point for ranking is 2000 QPS. We will use the highest recall for the search parameters that meet or exceed 2000 QPS. -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *eval* = final submissions are being evaluated. - * *final* = final submission and ranking. - -##### MSTuring Throughput Rankings - -|Rank|Submission |Team |Hardware |Status |Q/S | -|----|-------------------------------------------------------|------------------------------|-----------------------|--------|------------| -| 1|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md) |NVidia |NVidia GPU |eval|**[393,528](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_multigpu/private_msturing-1B_throughput.png)** | -| 2|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md) |Intel |Intel Optane |eval|**[127,883](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/private_msturing-1B_throughput.png)** | -| 3|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md) |NVidia |NVidia GPU |eval|**[110,886](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/private_msturing-1B_throughput.png)** | -| 4|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |eval|**[14,862](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/private_msturing-1B_throughput.png)** | -| 5|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |eval|**[2,630](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/private_msturing-1B_throughput.png)** | -| 6|[-](-) |- |- |-|**-** | - -* The operational point for ranking is 0.90 recall@10. We will use the highest throughput for the search parameters that meet or exceed 0.90 recall@10. -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *eval* = final submissions are being evaluated. - * *final* = final submission and ranking. - -##### MSTuring Power Rankings - -|Rank|Submission |Team |Hardware |Status|W*S/Q | -|----|-------------------------------------------------------|------------------------------|-----------------------|------|--------------| -| 1|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md) |Intel |Intel Optane |eval|**[0.0054](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/private_msturing-1B_power.png)** | -| 2|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md) |NVidia |NVidia GPU |eval|**[0.0061](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_multigpu/private_msturing-1B_power.png)** | -| 3|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md) |NVidia |NVidia GPU |eval|**[0.0099](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/private_msturing-1B_power.png)** | -| 4|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |eval|**[0.0250](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/private_msturing-1B_power.png)** | -| 5|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |eval|**[0.1874](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/private_msturing-1B_power.png)** | -| 6|[-](-) |- |- |-|**-** | - -* The operational point for ranking is 0.90 recall@10. We will use the lowest power consumption for the search parameters that meet or exceed 0.90 recall@10. -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *eval* = final submissions are being evaluated. - * *final* = final submission and ranking. - -##### MSTuring Cost Rankings - -|Rank|Submission |Team |Hardware |Status |Cost |capex |opex |unit cost|units@100K qps|KwH*4yrs | -|----|------------------------------------|------------------------------|-----------------------|--------|--------------|--------|--------|---------|--------------|----------| -| 1|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md) |Intel |Intel Optane |eval|**$16,572.66** |$14,664.20|$1,908.46|$14,664.20 |1 |19,084.644 | -| 2|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md) |NVidia |NVidia GPU |eval|**$152,142.96\*\*** |$150,000.00|$2,142.96|$150,000.00 |1 |21,429.628 | -| 3|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md) |NVidia |NVidia GPU |eval|**$153,483.45\*\*** |$150,000.00|$3,483.45|$150,000.00 |1 |34,834.523 | -| 4|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |eval|**$398,832.13** |$390,086.62|$8,745.51|$55,726.66 |7 |87,455.147 | -| 5|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |eval|**$924,531.60** |$858,854.10|$65,677.50|$22,021.90 |39 |656,775.000 | -| 6|[-](-) |- |- |-|**-** |-|-|- |- |- | - -* The operational point for ranking is 0.90 recall@10. We will use the lowest power consumption/query for the search parameters that meet or exceed 0.90 recall@10. -* The formula is based on: - * Take the algorithm's throughput submitted to leaderboard, use it to scale no. of systems needed to scale to 100K qps (using ceiling to round up any decimal.) - * Capex = cost per system * scale no. - * Take w*s/q from algorithm's power metric submitted to leaderboard and convert to KwH/q. - * Multiply by total queries at 100K qps for 4 years = 4x365x24x60x60x100000 total queries over 4 years. - * Opex = (total queries over 4 years) * KwH/query * $0.10/KwH -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *eval* = final submissions are being evaluated. - * *final* = final submission and ranking. -* \*\*Nvidia has not yet approved the MSRP cost used for ranking, so participation in this benchmark is still pending. - -#### MSSpace - -##### MSSpace Recall Rankings - -|Rank|Submission |Team |Hardware |Status |R@10 | -|----|-------------------------------------------------------|------------------------------|-----------------------|--------|---------| -| 1|[-](-) |- |- |-|**-** | -| 2|[-](-) |- |- |-|**-** | -| 3|[-](-) |- |- |-|**-** | -| 4|[-](-) |- |- |-|**-** | -| 5|[-](-) |- |- |-|**-** | -| 6|[-](-) |- |- |-|**-** | - -* The operational point for ranking is 2000 QPS. We will use the highest recall for the search parameters that meet or exceed 2000 QPS. -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *eval* = final submissions are being evaluated. - * *final* = final submission and ranking. - -##### MSSpace Throughput Rankings - -|Rank|Submission |Team |Hardware |Status |Q/S | -|----|-------------------------------------------------------|------------------------------|-----------------------|--------|------------| -| 1|[-](-) |- |- |-|**-** | -| 2|[-](-) |- |- |-|**-** | -| 3|[-](-) |- |- |-|**-** | -| 4|[-](-) |- |- |-|**-** | -| 5|[-](-) |- |- |-|**-** | -| 6|[-](-) |- |- |-|**-** | - -* The operational point for ranking is 0.9 recall@10. We will use the highest throughput for the search parameters that meet or exceed 0.9 recall@10. -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *eval* = final submissions are being evaluated. - * *final* = final submission and ranking. - -##### MSSpace Power Rankings - -|Rank|Submission |Team |Hardware |Status |W*S/Q | -|----|-------------------------------------------------------|------------------------------|-----------------------|--------|------------| -| 1|[-](-) |- |- |-|**-** | -| 2|[-](-) |- |- |-|**-** | -| 3|[-](-) |- |- |-|**-** | -| 4|[-](-) |- |- |-|**-** | -| 5|[-](-) |- |- |-|**-** | -| 6|[-](-) |- |- |-|**-** | - -* The operational point for ranking is 0.9 recall@10. We will use the lowest power consumption for the search parameters that meet or exceed 0.9 recall@10. -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *eval* = final submissions are being evaluated. - * *final* = final submission and ranking. - -##### MSSpace Cost Rankings - -|Rank|Submission |Team |Hardware |Status |Cost |capex |opex |unit cost|units@100K qps|KwH*4yrs | -|----|--------------------|------------------------------|-----------------------|------- |--------------|--------|--------|---------|--------------|----------| -| 1|[-](-)|- |- |-|**-** |-|-|- |- |- | -| 2|[-](-)|- |- |-|**-** |-|-|- |- |- | -| 3|[-](-)|- |- |-|**-** |-|-|- |- |- | -| 4|[-](-)|- |- |-|**-** |-|-|- |- |- | -| 5|[-](-)|- |- |-|**-** |-|-|- |- |- | -| 6|[-](-)|- |- |-|**-** |-|-|- |- |- | - -* The operational point for ranking is 0.90 recall@10. We will use the lowest power consumption/query for the search parameters that meet or exceed 0.90 recall@10. -* The formula is based on: - * Take the algorithm's throughput submitted to leaderboard, use it to scale no. of systems needed to scale to 100K qps (using ceiling to round up any decimal.) - * Capex = cost per system * scale no. - * Take w*s/q from algorithm's power metric submitted to leaderboard and convert to KwH/q. - * Multiply by total queries at 100K qps for 4 years = 4x365x24x60x60x100000 total queries over 4 years. - * Opex = (total queries over 4 years) * KwH/query * $0.10/KwH -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *eval* = final submissions are being evaluated. - * *final* = final submission and ranking. -* \*\*Nvidia has not yet approved the MSRP cost used for ranking, so participation in this benchmark is still pending. - -#### Text2Image - -##### Text2Image Recall Rankings - -|Rank|Submission |Team |Hardware |Status |R@10 | -|----|-------------------------------------------------------|------------------------------|-----------------------|--------|-------------| -| 1|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md) |NVidia |NVidia GPU |eval|**[0.63732](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/private_text2image-1B_recall.png)** | -| 2|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md) |Intel |Intel Optane |eval|**[0.62692](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/private_text2image-1B_recall.png)** | -| 3|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |eval|**[0.61895](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/private_text2image-1B_recall.png)** | -| 4|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |eval|**[0.60105](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/private_text2image-1B_recall.png)** | -| 5|[-](-) |- |- |-|**-** | -| 6|[-](-) |- |- |-|**-** | - -* The operational point for ranking is 2000 QPS. We will use the highest recall for the search parameters that meet or exceed 2000 QPS. -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *eval* = final submissions are being evaluated. - * *final* = final submission and ranking. - -##### Text2Image Throughput Rankings - -|Rank|Submission |Team |Hardware |Status |Q/S | -|----|-------------------------------------------------------|------------------------------|-----------------------|--------|------------| -| 1|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md) |NVidia |NVidia GPU |eval|**[20,765](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/private_text2image-1B_throughput.png)** | -| 2|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md) |Intel |Intel Optane |eval|**[10,364](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/private_text2image-1B_throughput.png)** | -| 3|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |eval|**[2,154](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/private_text2image-1B_throughput.png)** | -| 4|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |eval|**[1,948](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/private_text2image-1B_throughput.png)** | -| 5|[-](-) |- |- |-|**-** | -| 6|[-](-) |- |- |-|**-** | - -* The operational point for ranking is 0.601 recall@10. We will use the highest throughput for the search parameters that meet or exceed 0.601 recall@10. -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *eval* = final submissions are being evaluated. - * *final* = final submission and ranking. - -##### Text2Image Power Rankings - -|Rank|Submission |Team |Hardware |Status |W*S/Q | -|----|-------------------------------------------------------|------------------------------|-----------------------|--------|-----------| -| 1|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md) |NVidia |NVidia GPU |eval|**[0.0441](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/private_text2image-1B_power.png)**| -| 2|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md) |Intel |Intel Optane |eval|**[0.0710](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/private_text2image-1B_power.png)**| -| 3|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |eval|**[0.1091](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/private_text2image-1B_power.png)**| -| 4|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |eval|**[0.2661](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/private_text2image-1B_power.png)**| -| 5|[-](-) |- |- |-|**-**| -| 6|[-](-) |- |- |-|**-**| - -* The operational point for ranking is 0.601 recall@10. We will use the lowest power consumption for the search parameters that meet or exceed 0.601 recall@10. -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *eval* = final submissions are being evaluated. - * *final* = final submission and ranking. - -##### Text2Image Cost Rankings - -|Rank|Submission |Team |Hardware |Status |Cost |capex |opex |unit cost|units@100K qps|KwH*4yrs | -|----|---------------------|------------------------------|---------------------|--------|--------------|--------|--------|---------|--------------|---------| -| 1|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md) |Intel |Intel Optane |eval|**$171,523.05** |$146,642.00|$24,881.05|$14,664.20 |10 |248,810.537| -| 2|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md) |NVidia |NVidia GPU |eval|**$765,461.90\*\*** |$750,000.00|$15,461.90|$150,000.00 |5 |154,619.021| -| 3|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |eval|**$1,183,367.83** |$1,145,138.80|$38,229.03|$22,021.90 |52 |382,290.263| -| 4|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |eval|**$2,712,384.33** |$2,619,153.02|$93,231.31|$55,726.66 |47 |932,313.075| -| 5|[-](-) |- |- |-|**-** |-|-|- |- |-| -| 6|[-](-) |- |- |-|**-** |-|-|- |- |-| - -* The operational point for ranking is 0.601 recall@10. We will use the lowest power consumption/query for the search parameters that meet or exceed 0.601 recall@10. -* The formula is based on: - * Take the algorithm's throughput submitted to leaderboard, use it to scale no. of systems needed to scale to 100K qps (using ceiling to round up any decimal.) - * Capex = cost per system * scale no. - * Take w*s/q from algorithm's power metric submitted to leaderboard and convert to KwH/q. - * Multiply by total queries at 100K qps for 4 years = 4x365x24x60x60x100000 total queries over 4 years. - * Opex = (total queries over 4 years) * KwH/query * $0.10/KwH -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *eval* = final submissions are being evaluated. - * *final* = final submission and ranking. -* \*\*Nvidia has not yet approved the MSRP cost used for ranking, so participation in this benchmark is still pending. - -#### FBSimSearchNet - -##### FBSimSearchNet AP Rankings - -|Rank|Submission |Team |Hardware |Status |AP | -|----|-------------------------------------------------------|------------------------------|-----------------------|--------|-------------| -| 1|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |eval|**[0.99672](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/private_ssnpp-1B_recall.png)** | -| 2|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |eval|**[0.98567](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/private_ssnpp-1B_recall.png)** | -| 3|[-](-) |- |- |-|**-** | -| 4|[-](-) |- |- |-|**-** | -| 5|[-](-) |- |- |-|**-** | -| 6|[-](-) |- |- |-|**-** | - -* The operational point for ranking is 2000 QPS. We will use the highest recall for the search parameters that meet or exceed 2000 QPS. -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *eval* = final submissions are being evaluated. - * *final* = final submission and ranking. - -##### FBSimSearchNet Throughput Rankings - -|Rank|Submission |Team |Hardware |Status |Q/S | -|----|-------------------------------------------------------|------------------------------|-----------------------|--------|------------| -| 1|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |eval|**[9,343](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/private_ssnpp-1B_throughput.png)** | -| 2|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |eval|**[6,256](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/private_ssnpp-1B_throughput.png)** | -| 3|[-](-) |- |- |-|**-** | -| 4|[-](-) |- |- |-|**-** | -| 5|[-](-) |- |- |-|**-** | -| 6|[-](-) |- |- |-|**-** | - -* The operational point for ranking is 0.9 recall@10. We will use the highest throughput for the search parameters that meet or exceed 0.9 average precision. -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *eval* = final submissions are being evaluated. - * *final* = final submission and ranking. - - -##### FBSimSearchNet Power Rankings - -|Rank|Submission |Team |Hardware |Status |W*S/Q | -|----|-------------------------------------------------------|------------------------------|-----------------------|--------|-----------| -| 1|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |eval|**[0.0433](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/private_ssnpp-1B_power.png)**| -| 2|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |eval|**[0.0847](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/private_ssnpp-1B_power.png)**| -| 3|[-](-) |- |- |-|**-**| -| 4|[-](-) |- |- |-|**-**| -| 5|[-](-) |- |- |-|**-**| -| 6|[-](-) |- |- |-|**-**| - -* The operational point for ranking is 0.9 recall@10. We will use the lowest power consumption for the search parameters that meet or exceed 0.9 average precision. -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *eval* = final submissions are being evaluated. - * *final* = final submission and ranking. - -##### FBSimSearchNet Cost Rankings - -|Rank|Submission |Team |Hardware |Status |Cost |capex |opex |unit cost|units@100K qps|KwH*4yrs | -|----|--------------------|------------------------------|---------------------|--------|--------------|--------|--------|---------|--------------|---------| -| 1|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md)|Facebook Research(*org*) |NVidia GPU |eval|**$382,013.99** |$352,350.40|$29,663.59|$22,021.90 |16 |296,635.913| -| 2|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md)|GSI Technology(*org*) |LedaE APU |eval|**$628,176.55** |$612,993.26|$15,183.29|$55,726.66 |11 |151,832.906| -| 3|[-](-)|- |- |-|**-** | -| -| -| -| -| -| 4|[-](-)|- |- |-|**-** | -| -| -| -| -| - -* The operational point for ranking is 0.9 recall@10. We will use the lowest power consumption/query for the search parameters that meet or exceed 0.9 recall@10. -* The formula is based on: - * Take the algorithm's throughput submitted to leaderboard, use it to scale no. of systems needed to scale to 100K qps (using ceiling to round up any decimal.) - * Capex = cost per system * scale no. - * Take w*s/q from algorithm's power metric submitted to leaderboard and convert to KwH/q. - * Multiply by total queries at 100K qps for 4 years = 4x365x24x60x60x100000 total queries over 4 years. - * Opex = (total queries over 4 years) * KwH/query * $0.10/KwH -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *eval* = final submissions are being evaluated. - * *final* = final submission and ranking. -* \*\*Nvidia has not yet approved the MSRP cost used for ranking, so participation in this benchmark is still pending. diff --git a/t3/LEADERBOARDS_PRIVATE_REJECT_ANOMALIES.md b/t3/LEADERBOARDS_PRIVATE_REJECT_ANOMALIES.md deleted file mode 100644 index 45c1130d6..000000000 --- a/t3/LEADERBOARDS_PRIVATE_REJECT_ANOMALIES.md +++ /dev/null @@ -1,566 +0,0 @@ -# T3 Track Private Dataset Leaderboards After Rejecting Anomalies - -Please note that all rankings and winners are unofficial until all [open tasks and issues](TASKS_ISSUES_RESOLUTIONS.md) are resolved. - -## Rankings By Category - -### Rankings By Submission Name (alphabetical) - -|Submission |Team |Hardware |[Recall Rank](#recall-or-ap-rankings)|[Thru-put Rank](#throughput-rankings)|[Power Rank](#power-rankings)|[Cost Rank](#cost-rankings)|Status |Anomalies|Evaluator|Algo |Runs | -|--------------------|-----------|----------|---------|---------|---------|--------|---------|---------|---------|---------|--------| -|[diskann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/diskann-bare-metal/README.md) |Microsoft Research India(*org*) |Dell PowerEdge |*NA* |*NA* |*NA* |*NA* |eval |*NA* |[Harsha Simhadri](https://github.com/harsha-simhadri) |- |- | -|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |[5](#recall-or-ap-rankings) |[5](#throughput-rankings) |[5](#power-rankings) |[4](#cost-rankings) |eval |0/49 |[George Williams](https://github.com/sourcesync) |- |- | -|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |[3](#recall-or-ap-rankings) |[4](#throughput-rankings) |[4](#power-rankings) |[5](#cost-rankings) |eval |0/50 |[George Williams](https://github.com/sourcesync) |- |- | -|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md) |NVidia |NVidia GPU |[1](#recall-or-ap-rankings) |[3](#throughput-rankings) |[1](#power-rankings) |[2](#cost-rankings)\*\* |eval |[6/40](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/ANOMALIES.md) |[George Williams](https://github.com/sourcesync) |- |- | -|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md) |NVidia |NVidia GPU |[4](#recall-or-ap-rankings) |[1](#throughput-rankings) |[3](#power-rankings) |[3](#cost-rankings)\*\* |eval |[3/30](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/ANOMALIES.md) |[George Williams](https://github.com/sourcesync) |- |- | -|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md)|Intel |Intel Optane |[2](#recall-or-ap-rankings) |[2](#throughput-rankings) |[2](#power-rankings) |[1](#cost-rankings)|eval |[4/40](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/ANOMALIES.md) |[George Williams](https://github.com/sourcesync) |- |-| - -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *eval* = final submissions are being evaluated. - * *final* = final submission and ranking. - * *NQ* = not qualified - * *NA* = data is not yet available, or has not yet been processed - -* *Anomalies* are defined as queries that could potentially be the result of query response caching, a violation of the competition. Our detection method looks for a 30% or more improvement in the batch query latency between the first and last query of a query group (5). Participants have been given a chance to explain why detected anomalies (if any) are not a result of query response caching. In general, our analysis did not uncover this symptom of systematic query response caching from any submission. Also, if we throw out the anomalous data points, the adjusted leaderboard rankings (above) do not change even though some scores change slightly. - -* \*\*Nvidia has not yet approved the MSRP cost used for ranking, so participation in this benchmark is still pending. - -### Rankings Per Benchmark - -#### Recall Or AP Rankings - -|Rank|Submission |Team |Hardware|Status |Score |[Deep1B](#deep1B-recall-rankings)|[BigANN](#bigann-recall-rankings)|[MSTuring](#msturing-recall-rankings)|[MSSpace](#msspace-recall-rankings)|[Text2Image](#text2image-recall-rankings)|[FBSSNet](#fbsimsearchnet-ap-rankings)| -|----|------------------|-------|--------|-------|-----------|------|------|--------|-------|----------|-------| -| 1|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md)|NVidia|NVidia GPU |eval|**0.225**|0.99541 |0.99882 |0.99054 |- |0.63732 |- | -| 2|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md)|Intel|Intel Optane |eval|**0.224**|0.99872 |0.99977 |0.99561 |- |0.62692 |- | -| 3|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md)|GSI Technology(*org*)|LedaE APU |eval|**0.197**|0.99185 |0.99452 |0.97719 |- |0.61895 |0.99672 | -| 4|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md)|NVidia|NVidia GPU |eval|**0.094**|0.95788 |0.96978 |0.96186 |- |- |- | -| 5|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md)|Facebook Research(*org*)|NVidia GPU |eval|**baseline**|0.94437 |0.93604 |0.91513 |- |0.60105 |0.98567 | -| 6|[-](-)|-|- |-|**-**|- |- |- |- |- |- | - -* A submission must support at least 3 databases to qualify for this ranking. -* The ranking is based on the score, which is the sum of benchmark improvements of qualifying databases (shown in specific database columns after the score column.) -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *eval* = final submissions are being evaluated. - * *final* = final submission and ranking. - -#### Throughput Rankings - -|Rank|Submission |Team |Hardware|Status |Score |[Deep1B](#deep1B-throughput-rankings)|[BigANN](#bigann-throughput-rankings)|[MSTuring](#msturing-throughput-rankings)|[MSSpace](#msspace-throughput-rankings)|[Text2Image](#text2image-throughput-rankings)|[FBSSNet](#fbsimsearchnet-throughput-rankings)| -|----|------------------|-------|--------|-------|-----------|------|------|--------|-------|----------|--------------| -| 1|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md)|NVidia|NVidia GPU |eval|**2116124**|968,840 |764,700 |393,528 |- |- |- | -| 2|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md)|Intel|Intel Optane |eval|**685763**|210,403 |350,007 |127,883 |- |10,364 |- | -| 3|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md)|NVidia|NVidia GPU |eval|**297608**|94,042 |84,806 |110,886 |- |20,765 |- | -| 4|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md)|GSI Technology(*org*)|LedaE APU |eval|**30425**|12,345 |10,868 |14,862 |- |2,154 |9,343 | -| 5|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md)|Facebook Research(*org*)|NVidia GPU |eval|**baseline**|5,035 |3,279 |2,630 |- |1,948 |6,256 | -| 6|[-](-)|-|- |-|**-**|- |- |- |- |- |- | - -* A submission must support at least 3 databases to qualify for this ranking. -* The ranking is based on the score, which is the sum of benchmark improvements of qualifying databases (shown in specific database columns after the score column.) -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *eval* = final submissions are being evaluated. - * *final* = final submission and ranking. - -#### Power Rankings - -|Rank|Submission |Team |Hardware|Status |Score |[Deep1B](#deep1B-power-rankings)|[BigANN](#bigann-power-rankings)|[MSTuring](#msturing-power-rankings)|[MSSpace](#msspace-power-rankings)|[Text2Image](#text2image-power-rankings)|[FBSSNet](#fbsimsearchnet-power-rankings)| -|----|------------------|-------|--------|-------|-----------|------|------|--------|-------|-----|-----| -| 1|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md)|NVidia|NVidia GPU |eval|**-0.475**|0.0104 |0.0119 |0.0099 |- |0.0441|-| -| 2|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md)|Intel|Intel Optane |eval|**-0.469**|0.0036 |0.0020 |0.0054 |- |0.0710|-| -| 3|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md)|NVidia|NVidia GPU |eval|**-0.431**|0.0025 |0.0022 |0.0061 |- |-|-| -| 4|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md)|GSI Technology(*org*)|LedaE APU |eval|**-0.232**|0.0352 |0.0342 |0.0250 |- |0.2661|0.0433| -| 5|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md)|Facebook Research(*org*)|NVidia GPU |eval|**baseline**|0.0923 |0.1623 |0.1874 |- |0.1091|0.0847| -| 6|[-](-)|-|- |-|**-**|- |- |- |- |-|-| - -* A submission must support at least 3 databases to qualify for this ranking. -* The ranking is based on the score, which is the sum of benchmark improvements of qualifying databases (shown in specific database columns after the score column.) -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *eval* = final submissions are being evaluated. - * *final* = final submission and ranking. - -#### Cost Rankings - -|Rank|Submission |Team |Hardware|Status |Score |[Deep1B](#deep1B-cost-rankings)|[BigANN](#bigann-cost-rankings)|[MSTuring](#msturing-cost-rankings)|[MSSpace](#msspace-cost-rankings)|[Text2Image](#text2image-cost-rankings)|[FBSSNet](#fbsimsearchnet-cost-rankings)| -|----|------------------|-------|--------|-------|-----------|------|------|--------|-------|----------|--------------| -| 1|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md)|Intel|Intel Optane |eval|**$-3,100,829.55**|$15,918.69 |$15,372.40 |$16,572.66 |- |$171,523.05 |- | -| 2|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md)|NVidia|NVidia GPU |eval|**$-1,793,445.89\*\***|$303,647.91 |$304,177.20 |$153,483.45 |- |$765,461.90 |- | -| 3|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md)|NVidia|NVidia GPU |eval|**$-1,683,055.85\*\***|$150,863.29 |$150,786.43 |$152,142.96 |- |- |- | -| 4|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md)|Facebook Research(*org*)|NVidia GPU |eval|**baseline**|$472,764.09 |$739,552.84 |$924,531.60 |- |$1,183,367.83 |$382,013.99 | -| 5|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md)|GSI Technology(*org*)|LedaE APU |eval|**$1,120,285.77**|$513,869.50 |$569,253.60 |$398,832.13 |- |$2,712,384.33 |$628,176.55 | -| 6|[-](-)|-|- |-|**-**|- |- |- |- |- |- | - -* A submission must support at least 3 databases to qualify for this ranking. -* The ranking is based on the score, which is the sum of benchmark improvements of qualifying databases (shown in specific database columns after the score column.) -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *eval* = final submissions are being evaluated. - * *final* = final submission and ranking. - -* \*\*Nvidia has not yet approved the MSRP cost used for ranking, so participation in this benchmark is still pending. - -### Rankings Per Database - -#### Deep1B - -##### Deep1B Recall Rankings - -|Rank|Submission |Team |Hardware |Status |R@10 | -|----|-------------------------------------------------------|------------------------------|-----------------------|--------|-----------| -| 1|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md) |Intel |Intel Optane |eval|**0.99872**| -| 2|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md) |NVidia |NVidia GPU |eval|**0.99541**| -| 3|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |eval|**0.99185**| -| 4|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md) |NVidia |NVidia GPU |eval|**0.95788**| -| 5|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |eval|**0.94437**| -| 6|[-](-) |- |- |-|**-**| - -* The operational point for ranking is 2000 QPS. We will use the highest recall for the search parameters that meet or exceed 2000 QPS. -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *eval* = final submissions are being evaluated. - * *final* = final submission and ranking. - -##### Deep1B Throughput Rankings - -|Rank|Submission |Team |Hardware |Status |Q/S | -|----|-------------------------------------------------------|------------------------------|-----------------------|--------|-----------| -| 1|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md) |NVidia |NVidia GPU |eval|**968,840**| -| 2|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md) |Intel |Intel Optane |eval|**210,403**| -| 3|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md) |NVidia |NVidia GPU |eval|**94,042**| -| 4|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |eval|**12,345**| -| 5|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |eval|**5,035**| -| 6|[-](-) |- |- |-|**-**| - -* The operational point for ranking is 0.90 recall@10. We will use the highest throughput for the search parameters that meet or exceed 0.90 recall@10. -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *eval* = final submissions are being evaluated. - * *final* = final submission and ranking. - -##### Deep1B Power Rankings - -|Rank|Submission |Team |Hardware |Status |W*S/Q | -|----|-------------------------------------------------------|------------------------------|-----------------------|--------|-----------| -| 1|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md) |NVidia |NVidia GPU |eval|**0.0025**| -| 2|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md) |Intel |Intel Optane |eval|**0.0036**| -| 3|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md) |NVidia |NVidia GPU |eval|**0.0104**| -| 4|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |eval|**0.0352**| -| 5|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |eval|**0.0923**| -| 6|[-](-) |- |- |-|**-**| - -* The operational point for ranking is 0.90 recall@10. We will use the lowest power consumption for the search parameters that meet or exceed 0.90 recall@10. -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *eval* = final submissions are being evaluated. - * *final* = final submission and ranking. - -##### Deep1B Cost Rankings - -|Rank|Submission |Team |Hardware |Status |Cost |capex |opex |unit cost|units@100K qps|KwH*4yrs | -|----|--------------------|---------|-----------------------|--------|-------------|--------|--------|---------|--------------|---------| -| 1|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md)|Intel |Intel Optane |eval|**$15,918.69** |$14,664.20|$1,254.49|$14,664.20 |1 |12,544.902| -| 2|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md)|NVidia |NVidia GPU |eval|**$150,863.29\*\*** |$150,000.00|$863.29|$150,000.00 |1 |8,632.870| -| 3|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md)|NVidia |NVidia GPU |eval|**$303,647.91\*\*** |$300,000.00|$3,647.91|$150,000.00 |2 |36,479.145| -| 4|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md)|Facebook Research(*org*) |NVidia GPU |eval|**$472,764.09** |$440,438.00|$32,326.09|$22,021.90 |20 |323,260.873| -| 5|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md)|GSI Technology(*org*) |LedaE APU |eval|**$513,869.50** |$501,539.94|$12,329.56|$55,726.66 |9 |123,295.635| -| 6|[-](-)|- |- |-|**-** |-|-|- |- |-| - -* The operational point for ranking is 0.90 recall@10. We will use the lowest power consumption/query for the search parameters that meet or exceed 0.90 recall@10. -* The formula is based on: - * Take the algorithm's throughput submitted to leaderboard, use it to scale no. of systems needed to scale to 100K qps (using ceiling to round up any decimal.) - * Capex = cost per system * scale no. - * Take w*s/q from algorithm's power metric submitted to leaderboard and convert to KwH/q. - * Multiply by total queries at 100K qps for 4 years = 4x365x24x60x60x100000 total queries - * Opex = (total queries over 4 years) * KwH/query * $0.10/KwH -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *eval* = final submissions are being evaluated. - * *final* = final submission and ranking. -* \*\*Nvidia has not yet approved the MSRP cost used for ranking, so participation in this benchmark is still pending. - -#### BigANN - -##### BigANN Recall Rankings - -|Rank|Submission |Team |Hardware |Status |R@10 | -|----|-------------------------------------------------------|------------------------------|-----------------------|--------|-------------| -| 1|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md) |Intel |Intel Optane |eval|**0.99977** | -| 2|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md) |NVidia |NVidia GPU |eval|**0.99882** | -| 3|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |eval|**0.99452** | -| 4|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md) |NVidia |NVidia GPU |eval|**0.96978** | -| 5|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |eval|**0.93604** | -| 6|[-](-) |- |- |-|**-** | - -* The operational point for ranking is 2000 QPS. We will use the highest recall for the search parameters that meet or exceed 2000 QPS. -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *eval* = final submissions are being evaluated. - * *final* = final submission and ranking. - -##### BigANN Throughput Rankings - -|Rank|Submission |Team |Hardware |Status |Q/S | -|----|-------------------------------------------------------|------------------------------|-----------------------|--------|-------------| -| 1|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md) |NVidia |NVidia GPU |eval|**764,700** | -| 2|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md) |Intel |Intel Optane |eval|**350,007** | -| 3|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md) |NVidia |NVidia GPU |eval|**84,806** | -| 4|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |eval|**10,868** | -| 5|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |eval|**3,279** | -| 6|[-](-) |- |- |-|**-** | - -* The operational point for ranking is 0.90 recall@10. We will use the highest throughput for the search parameters that meet or exceed 0.90 recall@10. -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *eval* = final submissions are being evaluated. - * *final* = final submission and ranking. - -##### BigANN Power Rankings - -|Rank|Submission |Team |Hardware |Status |W*S/Q | -|----|-------------------------------------------------------|------------------------------|-----------------------|--------|-----------| -| 1|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md) |Intel |Intel Optane |eval|**0.0020**| -| 2|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md) |NVidia |NVidia GPU |eval|**0.0022**| -| 3|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md) |NVidia |NVidia GPU |eval|**0.0119**| -| 4|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |eval|**0.0342**| -| 5|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |eval|**0.1623**| -| 6|[-](-) |- |- |-|**-**| - -* The operational point for ranking is 0.90 recall@10. We will use the lowest power consumption for the search parameters that meet or exceed 0.90 recall@10. -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *eval* = final submissions are being evaluated. - * *final* = final submission and ranking. - -##### BigANN Cost Rankings - -|Rank|Submission |Team |Hardware |Status |Cost |capex |opex |unit cost|units@100K qps|KwH*4yrs | -|----|--------------------|------------------------------|--------------------|--------|--------------|--------|--------|---------|--------------|---------| -| 1|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md)|Intel |Intel Optane |eval|**$15,372.40** |$14,664.20|$708.20|$14,664.20 |1 |7,082.016| -| 2|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md)|NVidia |NVidia GPU |eval|**$150,786.43\*\*** |$150,000.00|$786.43|$150,000.00 |1 |7,864.254| -| 3|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md)|NVidia |NVidia GPU |eval|**$304,177.20\*\*** |$300,000.00|$4,177.20|$150,000.00 |2 |41,771.962| -| 4|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md)|GSI Technology(*org*) |LedaE APU |eval|**$569,253.60** |$557,266.60|$11,987.00|$55,726.66 |10 |119,870.019| -| 5|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md)|Facebook Research(*org*) |NVidia GPU |eval|**$739,552.84** |$682,678.90|$56,873.94|$22,021.90 |31 |568,739.415| -| 6|[-](-)|- |- |-|**-** |-|-|- |- |-| - -* The operational point for ranking is 0.90 recall@10. We will use the lowest power consumption/query for the search parameters that meet or exceed 0.90 recall@10. -* The formula is based on: - * Take the algorithm's throughput submitted to leaderboard, use it to scale no. of systems needed to scale to 100K qps (using ceiling to round up any decimal.) - * Capex = cost per system * scale no. - * Take w*s/q from algorithm's power metric submitted to leaderboard and convert to KwH/q. - * Multiply by total queries at 100K qps for 4 years = 4x365x24x60x60x100000 total queries over 4 years. - * Opex = (total queries over 4 years) * KwH/query * $0.10/KwH -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *eval* = final submissions are being evaluated. - * *final* = final submission and ranking. -* \*\*Nvidia has not yet approved the MSRP cost used for ranking, so participation in this benchmark is still pending. - -#### MSTuring - -##### MSTuring Recall Rankings - -|Rank|Submission |Team |Hardware |Status |R@10 | -|----|-------------------------------------------------------|------------------------------|-----------------------|--------|---------------| -| 1|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md) |Intel |Intel Optane |eval|**0.99561** | -| 2|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md) |NVidia |NVidia GPU |eval|**0.99054** | -| 3|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |eval|**0.97719** | -| 4|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md) |NVidia |NVidia GPU |eval|**0.96186** | -| 5|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |eval|**0.91513** | -| 6|[-](-) |- |- |-|**-** | - -* The operational point for ranking is 2000 QPS. We will use the highest recall for the search parameters that meet or exceed 2000 QPS. -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *eval* = final submissions are being evaluated. - * *final* = final submission and ranking. - -##### MSTuring Throughput Rankings - -|Rank|Submission |Team |Hardware |Status |Q/S | -|----|-------------------------------------------------------|------------------------------|-----------------------|--------|------------| -| 1|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md) |NVidia |NVidia GPU |eval|**393,528** | -| 2|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md) |Intel |Intel Optane |eval|**127,883** | -| 3|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md) |NVidia |NVidia GPU |eval|**110,886** | -| 4|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |eval|**14,862** | -| 5|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |eval|**2,630** | -| 6|[-](-) |- |- |-|**-** | - -* The operational point for ranking is 0.90 recall@10. We will use the highest throughput for the search parameters that meet or exceed 0.90 recall@10. -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *eval* = final submissions are being evaluated. - * *final* = final submission and ranking. - -##### MSTuring Power Rankings - -|Rank|Submission |Team |Hardware |Status|W*S/Q | -|----|-------------------------------------------------------|------------------------------|-----------------------|------|--------------| -| 1|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md) |Intel |Intel Optane |eval|**0.0054** | -| 2|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md) |NVidia |NVidia GPU |eval|**0.0061** | -| 3|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md) |NVidia |NVidia GPU |eval|**0.0099** | -| 4|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |eval|**0.0250** | -| 5|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |eval|**0.1874** | -| 6|[-](-) |- |- |-|**-** | - -* The operational point for ranking is 0.90 recall@10. We will use the lowest power consumption for the search parameters that meet or exceed 0.90 recall@10. -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *eval* = final submissions are being evaluated. - * *final* = final submission and ranking. - -##### MSTuring Cost Rankings - -|Rank|Submission |Team |Hardware |Status |Cost |capex |opex |unit cost|units@100K qps|KwH*4yrs | -|----|------------------------------------|------------------------------|-----------------------|--------|--------------|--------|--------|---------|--------------|----------| -| 1|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md) |Intel |Intel Optane |eval|**$16,572.66** |$14,664.20|$1,908.46|$14,664.20 |1 |19,084.644 | -| 2|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md) |NVidia |NVidia GPU |eval|**$152,142.96\*\*** |$150,000.00|$2,142.96|$150,000.00 |1 |21,429.628 | -| 3|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md) |NVidia |NVidia GPU |eval|**$153,483.45\*\*** |$150,000.00|$3,483.45|$150,000.00 |1 |34,834.523 | -| 4|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |eval|**$398,832.13** |$390,086.62|$8,745.51|$55,726.66 |7 |87,455.147 | -| 5|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |eval|**$924,531.60** |$858,854.10|$65,677.50|$22,021.90 |39 |656,775.000 | -| 6|[-](-) |- |- |-|**-** |-|-|- |- |- | - -* The operational point for ranking is 0.90 recall@10. We will use the lowest power consumption/query for the search parameters that meet or exceed 0.90 recall@10. -* The formula is based on: - * Take the algorithm's throughput submitted to leaderboard, use it to scale no. of systems needed to scale to 100K qps (using ceiling to round up any decimal.) - * Capex = cost per system * scale no. - * Take w*s/q from algorithm's power metric submitted to leaderboard and convert to KwH/q. - * Multiply by total queries at 100K qps for 4 years = 4x365x24x60x60x100000 total queries over 4 years. - * Opex = (total queries over 4 years) * KwH/query * $0.10/KwH -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *eval* = final submissions are being evaluated. - * *final* = final submission and ranking. -* \*\*Nvidia has not yet approved the MSRP cost used for ranking, so participation in this benchmark is still pending. - -#### MSSpace - -##### MSSpace Recall Rankings - -|Rank|Submission |Team |Hardware |Status |R@10 | -|----|-------------------------------------------------------|------------------------------|-----------------------|--------|---------| -| 1|[-](-) |- |- |-|**-** | -| 2|[-](-) |- |- |-|**-** | -| 3|[-](-) |- |- |-|**-** | -| 4|[-](-) |- |- |-|**-** | -| 5|[-](-) |- |- |-|**-** | -| 6|[-](-) |- |- |-|**-** | - -* The operational point for ranking is 2000 QPS. We will use the highest recall for the search parameters that meet or exceed 2000 QPS. -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *eval* = final submissions are being evaluated. - * *final* = final submission and ranking. - -##### MSSpace Throughput Rankings - -|Rank|Submission |Team |Hardware |Status |Q/S | -|----|-------------------------------------------------------|------------------------------|-----------------------|--------|------------| -| 1|[-](-) |- |- |-|**-** | -| 2|[-](-) |- |- |-|**-** | -| 3|[-](-) |- |- |-|**-** | -| 4|[-](-) |- |- |-|**-** | -| 5|[-](-) |- |- |-|**-** | -| 6|[-](-) |- |- |-|**-** | - -* The operational point for ranking is 0.9 recall@10. We will use the highest throughput for the search parameters that meet or exceed 0.9 recall@10. -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *eval* = final submissions are being evaluated. - * *final* = final submission and ranking. - -##### MSSpace Power Rankings - -|Rank|Submission |Team |Hardware |Status |W*S/Q | -|----|-------------------------------------------------------|------------------------------|-----------------------|--------|------------| -| 1|[-](-) |- |- |-|**-** | -| 2|[-](-) |- |- |-|**-** | -| 3|[-](-) |- |- |-|**-** | -| 4|[-](-) |- |- |-|**-** | -| 5|[-](-) |- |- |-|**-** | -| 6|[-](-) |- |- |-|**-** | - -* The operational point for ranking is 0.9 recall@10. We will use the lowest power consumption for the search parameters that meet or exceed 0.9 recall@10. -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *eval* = final submissions are being evaluated. - * *final* = final submission and ranking. - -##### MSSpace Cost Rankings - -|Rank|Submission |Team |Hardware |Status |Cost |capex |opex |unit cost|units@100K qps|KwH*4yrs | -|----|--------------------|------------------------------|-----------------------|------- |--------------|--------|--------|---------|--------------|----------| -| 1|[-](-)|- |- |-|**-** |-|-|- |- |- | -| 2|[-](-)|- |- |-|**-** |-|-|- |- |- | -| 3|[-](-)|- |- |-|**-** |-|-|- |- |- | -| 4|[-](-)|- |- |-|**-** |-|-|- |- |- | -| 5|[-](-)|- |- |-|**-** |-|-|- |- |- | -| 6|[-](-)|- |- |-|**-** |-|-|- |- |- | - -* The operational point for ranking is 0.90 recall@10. We will use the lowest power consumption/query for the search parameters that meet or exceed 0.90 recall@10. -* The formula is based on: - * Take the algorithm's throughput submitted to leaderboard, use it to scale no. of systems needed to scale to 100K qps (using ceiling to round up any decimal.) - * Capex = cost per system * scale no. - * Take w*s/q from algorithm's power metric submitted to leaderboard and convert to KwH/q. - * Multiply by total queries at 100K qps for 4 years = 4x365x24x60x60x100000 total queries over 4 years. - * Opex = (total queries over 4 years) * KwH/query * $0.10/KwH -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *eval* = final submissions are being evaluated. - * *final* = final submission and ranking. -* \*\*Nvidia has not yet approved the MSRP cost used for ranking, so participation in this benchmark is still pending. - -#### Text2Image - -##### Text2Image Recall Rankings - -|Rank|Submission |Team |Hardware |Status |R@10 | -|----|-------------------------------------------------------|------------------------------|-----------------------|--------|-------------| -| 1|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md) |NVidia |NVidia GPU |eval|**0.63732** | -| 2|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md) |Intel |Intel Optane |eval|**0.62692** | -| 3|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |eval|**0.61895** | -| 4|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |eval|**0.60105** | -| 5|[-](-) |- |- |-|**-** | -| 6|[-](-) |- |- |-|**-** | - -* The operational point for ranking is 2000 QPS. We will use the highest recall for the search parameters that meet or exceed 2000 QPS. -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *eval* = final submissions are being evaluated. - * *final* = final submission and ranking. - -##### Text2Image Throughput Rankings - -|Rank|Submission |Team |Hardware |Status |Q/S | -|----|-------------------------------------------------------|------------------------------|-----------------------|--------|------------| -| 1|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md) |NVidia |NVidia GPU |eval|**20,765** | -| 2|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md) |Intel |Intel Optane |eval|**10,364** | -| 3|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |eval|**2,154** | -| 4|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |eval|**1,948** | -| 5|[-](-) |- |- |-|**-** | -| 6|[-](-) |- |- |-|**-** | - -* The operational point for ranking is 0.601 recall@10. We will use the highest throughput for the search parameters that meet or exceed 0.601 recall@10. -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *eval* = final submissions are being evaluated. - * *final* = final submission and ranking. - -##### Text2Image Power Rankings - -|Rank|Submission |Team |Hardware |Status |W*S/Q | -|----|-------------------------------------------------------|------------------------------|-----------------------|--------|-----------| -| 1|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md) |NVidia |NVidia GPU |eval|**0.0441**| -| 2|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md) |Intel |Intel Optane |eval|**0.0710**| -| 3|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |eval|**0.1091**| -| 4|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |eval|**0.2661**| -| 5|[-](-) |- |- |-|**-**| -| 6|[-](-) |- |- |-|**-**| - -* The operational point for ranking is 0.601 recall@10. We will use the lowest power consumption for the search parameters that meet or exceed 0.601 recall@10. -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *eval* = final submissions are being evaluated. - * *final* = final submission and ranking. - -##### Text2Image Cost Rankings - -|Rank|Submission |Team |Hardware |Status |Cost |capex |opex |unit cost|units@100K qps|KwH*4yrs | -|----|---------------------|------------------------------|---------------------|--------|--------------|--------|--------|---------|--------------|---------| -| 1|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md) |Intel |Intel Optane |eval|**$171,523.05** |$146,642.00|$24,881.05|$14,664.20 |10 |248,810.537| -| 2|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md) |NVidia |NVidia GPU |eval|**$765,461.90\*\*** |$750,000.00|$15,461.90|$150,000.00 |5 |154,619.021| -| 3|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |eval|**$1,183,367.83** |$1,145,138.80|$38,229.03|$22,021.90 |52 |382,290.263| -| 4|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |eval|**$2,712,384.33** |$2,619,153.02|$93,231.31|$55,726.66 |47 |932,313.075| -| 5|[-](-) |- |- |-|**-** |-|-|- |- |-| -| 6|[-](-) |- |- |-|**-** |-|-|- |- |-| - -* The operational point for ranking is 0.601 recall@10. We will use the lowest power consumption/query for the search parameters that meet or exceed 0.601 recall@10. -* The formula is based on: - * Take the algorithm's throughput submitted to leaderboard, use it to scale no. of systems needed to scale to 100K qps (using ceiling to round up any decimal.) - * Capex = cost per system * scale no. - * Take w*s/q from algorithm's power metric submitted to leaderboard and convert to KwH/q. - * Multiply by total queries at 100K qps for 4 years = 4x365x24x60x60x100000 total queries over 4 years. - * Opex = (total queries over 4 years) * KwH/query * $0.10/KwH -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *eval* = final submissions are being evaluated. - * *final* = final submission and ranking. -* \*\*Nvidia has not yet approved the MSRP cost used for ranking, so participation in this benchmark is still pending. - -#### FBSimSearchNet - -##### FBSimSearchNet AP Rankings - -|Rank|Submission |Team |Hardware |Status |AP | -|----|-------------------------------------------------------|------------------------------|-----------------------|--------|-------------| -| 1|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |eval|**0.99672** | -| 2|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |eval|**0.98567** | -| 3|[-](-) |- |- |-|**-** | -| 4|[-](-) |- |- |-|**-** | -| 5|[-](-) |- |- |-|**-** | -| 6|[-](-) |- |- |-|**-** | - -* The operational point for ranking is 2000 QPS. We will use the highest recall for the search parameters that meet or exceed 2000 QPS. -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *eval* = final submissions are being evaluated. - * *final* = final submission and ranking. - -##### FBSimSearchNet Throughput Rankings - -|Rank|Submission |Team |Hardware |Status |Q/S | -|----|-------------------------------------------------------|------------------------------|-----------------------|--------|------------| -| 1|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |eval|**9,343** | -| 2|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |eval|**6,256** | -| 3|[-](-) |- |- |-|**-** | -| 4|[-](-) |- |- |-|**-** | -| 5|[-](-) |- |- |-|**-** | -| 6|[-](-) |- |- |-|**-** | - -* The operational point for ranking is 0.9 recall@10. We will use the highest throughput for the search parameters that meet or exceed 0.9 average precision. -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *eval* = final submissions are being evaluated. - * *final* = final submission and ranking. - - -##### FBSimSearchNet Power Rankings - -|Rank|Submission |Team |Hardware |Status |W*S/Q | -|----|-------------------------------------------------------|------------------------------|-----------------------|--------|-----------| -| 1|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |eval|**0.0433**| -| 2|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |eval|**0.0847**| -| 3|[-](-) |- |- |-|**-**| -| 4|[-](-) |- |- |-|**-**| -| 5|[-](-) |- |- |-|**-**| -| 6|[-](-) |- |- |-|**-**| - -* The operational point for ranking is 0.9 recall@10. We will use the lowest power consumption for the search parameters that meet or exceed 0.9 average precision. -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *eval* = final submissions are being evaluated. - * *final* = final submission and ranking. - -##### FBSimSearchNet Cost Rankings - -|Rank|Submission |Team |Hardware |Status |Cost |capex |opex |unit cost|units@100K qps|KwH*4yrs | -|----|--------------------|------------------------------|---------------------|--------|--------------|--------|--------|---------|--------------|---------| -| 1|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md)|Facebook Research(*org*) |NVidia GPU |eval|**$382,013.99** |$352,350.40|$29,663.59|$22,021.90 |16 |296,635.913| -| 2|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md)|GSI Technology(*org*) |LedaE APU |eval|**$628,176.55** |$612,993.26|$15,183.29|$55,726.66 |11 |151,832.906| -| 3|[-](-)|- |- |-|**-** | -| -| -| -| -| -| 4|[-](-)|- |- |-|**-** | -| -| -| -| -| - -* The operational point for ranking is 0.9 recall@10. We will use the lowest power consumption/query for the search parameters that meet or exceed 0.9 recall@10. -* The formula is based on: - * Take the algorithm's throughput submitted to leaderboard, use it to scale no. of systems needed to scale to 100K qps (using ceiling to round up any decimal.) - * Capex = cost per system * scale no. - * Take w*s/q from algorithm's power metric submitted to leaderboard and convert to KwH/q. - * Multiply by total queries at 100K qps for 4 years = 4x365x24x60x60x100000 total queries over 4 years. - * Opex = (total queries over 4 years) * KwH/query * $0.10/KwH -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *eval* = final submissions are being evaluated. - * *final* = final submission and ranking. -* \*\*Nvidia has not yet approved the MSRP cost used for ranking, so participation in this benchmark is still pending. diff --git a/t3/LEADERBOARDS_PUBLIC.md b/t3/LEADERBOARDS_PUBLIC.md deleted file mode 100644 index ecc07019f..000000000 --- a/t3/LEADERBOARDS_PUBLIC.md +++ /dev/null @@ -1,566 +0,0 @@ -# T3 Track Public Dataset Leaderboards - -Please note that all rankings and winners are unofficial until all [open tasks and issues](TASKS_ISSUES_RESOLUTIONS.md) are resolved. - -## Rankings By Category - -### Rankings By Submission Name (alphabetical) - -|Submission |Team |Hardware |[Recall Rank](#recall-or-ap-rankings)|[Thru-put Rank](#throughput-rankings)|[Power Rank](#power-rankings)|[Cost Rank](#cost-rankings)|Status |Anomalies|Evaluator|Algo |Runs | -|--------------------|-----------|----------|---------|---------|---------|--------|---------|---------|---------|---------|--------| -|[diskann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/diskann-bare-metal/README.md) |Microsoft Research India(*org*) |Dell PowerEdge |[1](#recall-or-ap-rankings) |[5](#throughput-rankings) |*NQ* |*NQ* |final |*NA* |[Harsha Simhadri](https://github.com/harsha-simhadri) |[src](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/benchmark/algorithms/diskann-t2.py) |[nb](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/diskann-bare-metal/EvalPublic.ipynb) | -|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |[6](#recall-or-ap-rankings) |[6](#throughput-rankings) |[5](#power-rankings) |[5](#cost-rankings) |final |0/58 |[George Williams](https://github.com/sourcesync) |[src](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/benchmark/algorithms/faiss_t3.py) |[nb](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/EvalPublic.ipynb) | -|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |[4](#recall-or-ap-rankings) |[4](#throughput-rankings) |[4](#power-rankings) |[4](#cost-rankings) |final |0/60 |[George Williams](https://github.com/sourcesync) |[src](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/benchmark/algorithms/gemini.py) |[nb](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/EvalPublic.ipynb) | -|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md) |NVidia |NVidia GPU |[3](#recall-or-ap-rankings) |[3](#throughput-rankings) |[2](#power-rankings) |[2](#cost-rankings)\*\* |final |[6/50](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/ANOMALIES.md) |[George Williams](https://github.com/sourcesync) |[src](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/benchmark/algorithms/cuanns_ivfpq.py) |[nb](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/EvalPublic.ipynb) | -|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md) |NVidia |NVidia GPU |[5](#recall-or-ap-rankings) |[1](#throughput-rankings) |[3](#power-rankings) |[3](#cost-rankings)\*\* |final |[4/40](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/ANOMALIES.md) |[George Williams](https://github.com/sourcesync) |[src](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/benchmark/algorithms/cuanns_multigpu.py) |[nb](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_multigpu/EvalPublic.ipynb) | -|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md)|Intel |Intel Optane |[2](#recall-or-ap-rankings) |[2](#throughput-rankings) |[1](#power-rankings) |[1](#cost-rankings)|final |[5/50](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/ANOMALIES.md) |[George Williams](https://github.com/sourcesync) |[src](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/benchmark/algorithms/graphann.py) |[nb](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/EvalPublic.ipynb)| - -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *eval* = final submissions are being evaluated. - * *final* = final submission and ranking. - * *NQ* = not qualified - * *NA* = data is not yet available, or has not yet been processed - -* *Anomalies* are defined as queries that could potentially be the result of query response caching, a violation of the competition. Our detection method looks for a 30% or more improvement in the batch query latency between the first and last query of a query group (5). Participants have been given a chance to explain why detected anomalies (if any) are not a result of query response caching. In general, our analysis did not uncover this symptom of systematic query response caching from any submission. Also, if we throw out the anomalous data points, the [adjusted leaderboard rankings](LEADERBOARDS_PUBLIC_REJECT_ANOMALIES.md) do not change even though some scores change slightly. - -* \*\*Nvidia has not yet approved the MSRP cost used for ranking, so participation in this benchmark is still pending. - -### Rankings Per Benchmark - -#### Recall Or AP Rankings - -|Rank|Submission |Team |Hardware|Status |Score |[Deep1B](#deep1B-recall-rankings)|[BigANN](#bigann-recall-rankings)|[MSTuring](#msturing-recall-rankings)|[MSSpace](#msspace-recall-rankings)|[Text2Image](#text2image-recall-rankings)|[FBSSNet](#fbsimsearchnet-ap-rankings)| -|----|------------------|-------|--------|-------|-----------|------|------|--------|-------|----------|-------| -| 1|[diskann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/diskann-bare-metal/README.md)|Microsoft Research India(*org*)|Dell PowerEdge |final|**0.410**|[0.99821](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/diskann-bare-metal/deep-1B_recall.png) |[0.99976](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/diskann-bare-metal/bigann-1B_recall.png) |[0.99444](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/diskann-bare-metal/msturing-1B_recall.png) |[0.99342](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/diskann-bare-metal/msspacev-1B_recall.png) |[0.98130](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/diskann-bare-metal/text2image-1B_recall.png) |- | -| 2|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md)|Intel|Intel Optane |final|**0.409**|[0.99882](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/deep-1B_recall.png) |[0.99978](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/bigann-1B_recall.png) |[0.99568](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/msturing-1B_recall.png) |[0.99835](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/msspacev-1B_recall.png) |[0.97340](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/text2image-1B_recall.png) |- | -| 3|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md)|NVidia|NVidia GPU |final|**0.368**|[0.99543](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/deep-1B_recall.png) |[0.99881](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/bigann-1B_recall.png) |[0.98993](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/msturing-1B_recall.png) |[0.99429](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/msspacev-1B_recall.png) |[0.94692](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/text2image-1B_recall.png) |- | -| 4|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md)|GSI Technology(*org*)|LedaE APU |final|**0.339**|[0.99208](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/deep-1B_recall.png) |[0.99328](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/bigann-1B_recall.png) |[0.97841](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/msturing-1B_recall.png) |[0.98622](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/msspacev-1B_recall.png) |[0.92855](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/text2image-1B_recall.png) |[0.99684](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/ssnpp-1B_recall.png) | -| 5|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md)|NVidia|NVidia GPU |final|**0.268**|[0.99504](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_multigpu/deep-1B_recall.png) |[0.99815](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_multigpu/bigann-1B_recall.png) |[0.98399](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_multigpu/msturing-1B_recall.png) |[0.98785](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_multigpu/msspacev-1B_recall.png) |- |- | -| 6|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md)|Facebook Research(*org*)|NVidia GPU |final|**baseline**|[0.94275](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/deep-1B_recall.png) |[0.93260](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/bigann-1B_recall.png) |[0.91322](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/msturing-1B_recall.png) |[0.90853](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/msspacev-1B_recall.png) |[0.86028](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/text2image-1B_recall.png) |[0.97863](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/ssnpp-1B_recall.png) | - -* A submission must support at least 3 databases to qualify for this ranking. -* The ranking is based on the score, which is the sum of benchmark improvements of qualifying databases (shown in specific database columns after the score column.) -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *eval* = final submissions are being evaluated. - * *final* = final submission and ranking. - -#### Throughput Rankings - -|Rank|Submission |Team |Hardware|Status |Score |[Deep1B](#deep1B-throughput-rankings)|[BigANN](#bigann-throughput-rankings)|[MSTuring](#msturing-throughput-rankings)|[MSSpace](#msspace-throughput-rankings)|[Text2Image](#text2image-throughput-rankings)|[FBSSNet](#fbsimsearchnet-throughput-rankings)| -|----|------------------|-------|--------|-------|-----------|------|------|--------|-------|----------|--------------| -| 1|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md)|NVidia|NVidia GPU |final|**2959313**|[801,694](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_multigpu/deep-1B_throughput.png) |[747,421](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_multigpu/bigann-1B_throughput.png) |[584,293](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_multigpu/msturing-1B_throughput.png) |[839,749](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_multigpu/msspacev-1B_throughput.png) |- |- | -| 2|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md)|Intel|Intel Optane |final|**853257**|[196,546](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/deep-1B_throughput.png) |[335,991](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/bigann-1B_throughput.png) |[161,463](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/msturing-1B_throughput.png) |[157,828](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/msspacev-1B_throughput.png) |[17,063](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/text2image-1B_throughput.png) |- | -| 3|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md)|NVidia|NVidia GPU |final|**393318**|[91,701](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/deep-1B_throughput.png) |[80,109](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/bigann-1B_throughput.png) |[109,745](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/msturing-1B_throughput.png) |[108,302](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/msspacev-1B_throughput.png) |[19,094](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/text2image-1B_throughput.png) |- | -| 4|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md)|GSI Technology(*org*)|LedaE APU |final|**52429**|[10,704](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/deep-1B_throughput.png) |[10,672](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/bigann-1B_throughput.png) |[21,780](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/msturing-1B_throughput.png) |[16,422](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/msspacev-1B_throughput.png) |[4,838](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/text2image-1B_throughput.png) |[9,345](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/ssnpp-1B_throughput.png) | -| 5|[diskann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/diskann-bare-metal/README.md)|Microsoft Research India(*org*)|Dell PowerEdge |final|**49398**|[12,927](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/diskann-bare-metal/deep-1B_throughput.png) |[19,094](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/diskann-bare-metal/bigann-1B_throughput.png) |[17,201](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/diskann-bare-metal/msturing-1B_throughput.png) |[6,503](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/diskann-bare-metal/msspacev-1B_throughput.png) |[9,307](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/diskann-bare-metal/text2image-1B_throughput.png) |- | -| 6|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md)|Facebook Research(*org*)|NVidia GPU |final|**baseline**|[4,464](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/deep-1B_throughput.png) |[3,271](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/bigann-1B_throughput.png) |[2,845](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/msturing-1B_throughput.png) |[3,265](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/msspacev-1B_throughput.png) |[1,789](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/text2image-1B_throughput.png) |[5,699](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/ssnpp-1B_throughput.png) | - -* A submission must support at least 3 databases to qualify for this ranking. -* The ranking is based on the score, which is the sum of benchmark improvements of qualifying databases (shown in specific database columns after the score column.) -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *eval* = final submissions are being evaluated. - * *final* = final submission and ranking. - -#### Power Rankings - -|Rank|Submission |Team |Hardware|Status |Score |[Deep1B](#deep1B-power-rankings)|[BigANN](#bigann-power-rankings)|[MSTuring](#msturing-power-rankings)|[MSSpace](#msspace-power-rankings)|[Text2Image](#text2image-power-rankings)|[FBSSNet](#fbsimsearchnet-power-rankings)| -|----|------------------|-------|--------|-------|-----------|------|------|--------|-------|-----|-----| -| 1|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md)|Intel|Intel Optane |final|**-0.648**|[0.0041](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/deep-1B_power.png) |[0.0022](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/bigann-1B_power.png) |[0.0048](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/msturing-1B_power.png) |[0.0049](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/msspacev-1B_power.png) |[0.0446](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/text2image-1B_power.png)|-| -| 2|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md)|NVidia|NVidia GPU |final|**-0.619**|[0.0112](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/deep-1B_power.png) |[0.0119](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/bigann-1B_power.png) |[0.0090](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/msturing-1B_power.png) |[0.0090](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/msspacev-1B_power.png) |[0.0480](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/text2image-1B_power.png)|-| -| 3|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md)|NVidia|NVidia GPU |final|**-0.583**|[0.0029](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_multigpu/deep-1B_power.png) |[0.0024](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_multigpu/bigann-1B_power.png) |[0.0049](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_multigpu/msturing-1B_power.png) |[0.0023](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_multigpu/msspacev-1B_power.png) |-|-| -| 4|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md)|GSI Technology(*org*)|LedaE APU |final|**-0.513**|[0.0337](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/deep-1B_power.png) |[0.0341](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/bigann-1B_power.png) |[0.0236](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/msturing-1B_power.png) |[0.0230](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/msspacev-1B_power.png) |[0.1242](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/text2image-1B_power.png)|[0.0469](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/ssnpp-1B_power.png)| -| 5|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/diskann-bare-metal/README.md)|Facebook Research(*org*)|NVidia GPU |final|**baseline**|[0.1117](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/deep-1B_power.png) |[0.1576](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/bigann-1B_power.png) |[0.1743](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/msturing-1B_power.png) |[0.1520](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/msspacev-1B_power.png) |[0.1128](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/text2image-1B_power.png)|[0.0904](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/ssnpp-1B_power.png)| -| 6|[-](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md)|-|- |-|**-**|- |- |- |- |-|-| - -* A submission must support at least 3 databases to qualify for this ranking. -* The ranking is based on the score, which is the sum of benchmark improvements of qualifying databases (shown in specific database columns after the score column.) -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *eval* = final submissions are being evaluated. - * *final* = final submission and ranking. - -#### Cost Rankings - -|Rank|Submission |Team |Hardware|Status |Score |[Deep1B](#deep1B-cost-rankings)|[BigANN](#bigann-cost-rankings)|[MSTuring](#msturing-cost-rankings)|[MSSpace](#msspace-cost-rankings)|[Text2Image](#text2image-cost-rankings)|[FBSSNet](#fbsimsearchnet-cost-rankings)| -|----|------------------|-------|--------|-------|-----------|------|------|--------|-------|----------|--------------| -| 1|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md)|Intel|Intel Optane |final|**$-3,978,198.83**|$16,086.82 |$15,439.92 |$16,347.45 |$16,382.81 |$103,599.49 |- | -| 2|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md)|NVidia|NVidia GPU |final|**$-2,314,829.98\*\***|$303,929.39 |$304,166.48 |$153,151.00 |$153,155.12 |$916,823.34 |- | -| 3|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md)|NVidia|NVidia GPU |final|**$-2,268,943.17\*\***|$151,009.85 |$150,824.13 |$151,726.30 |$150,816.00 |- |- | -| 4|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md)|GSI Technology(*org*)|LedaE APU |final|**$-907,570.13**|$569,058.09 |$569,210.35 |$286,911.87 |$398,163.18 |$1,213,773.56 |$629,442.91 | -| 5|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md)|Facebook Research(*org*)|NVidia GPU |final|**baseline**|$545,633.16 |$737,886.17 |$853,857.46 |$735,942.66 |$1,272,735.86 |$428,074.79 | -| 6|[-](-)|-|- |-|**-**|- |- |- |- |- |- | - -* A submission must support at least 3 databases to qualify for this ranking. -* The ranking is based on the score, which is the sum of benchmark improvements of qualifying databases (shown in specific database columns after the score column.) -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *eval* = final submissions are being evaluated. - * *final* = final submission and ranking. - -* \*\*Nvidia has not yet approved the MSRP cost used for ranking, so participation in this benchmark is still pending. - -### Rankings Per Database - -#### Deep1B - -##### Deep1B Recall Rankings - -|Rank|Submission |Team |Hardware |Status |R@10 | -|----|-------------------------------------------------------|------------------------------|-----------------------|--------|-----------| -| 1|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md) |Intel |Intel Optane |final|**[0.99882](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/deep-1B_recall.png)**| -| 2|[diskann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/diskann-bare-metal/README.md) |Microsoft Research India(*org*) |Dell PowerEdge |final|**[0.99821](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/diskann-bare-metal/deep-1B_recall.png)**| -| 3|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md) |NVidia |NVidia GPU |final|**[0.99543](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/deep-1B_recall.png)**| -| 4|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md) |NVidia |NVidia GPU |final|**[0.99504](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_multigpu/deep-1B_recall.png)**| -| 5|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |final|**[0.99208](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/deep-1B_recall.png)**| -| 6|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |final|**[0.94275](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/deep-1B_recall.png)**| - -* The operational point for ranking is 2000 QPS. We will use the highest recall for the search parameters that meet or exceed 2000 QPS. -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *eval* = final submissions are being evaluated. - * *final* = final submission and ranking. - -##### Deep1B Throughput Rankings - -|Rank|Submission |Team |Hardware |Status |Q/S | -|----|-------------------------------------------------------|------------------------------|-----------------------|--------|-----------| -| 1|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md) |NVidia |NVidia GPU |final|**[801,694](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_multigpu/deep-1B_throughput.png)**| -| 2|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md) |Intel |Intel Optane |final|**[196,546](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/deep-1B_throughput.png)**| -| 3|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md) |NVidia |NVidia GPU |final|**[91,701](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/deep-1B_throughput.png)**| -| 4|[diskann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/diskann-bare-metal/README.md) |Microsoft Research India(*org*) |Dell PowerEdge |final|**[12,927](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/diskann-bare-metal/deep-1B_throughput.png)**| -| 5|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |final|**[10,704](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/deep-1B_throughput.png)**| -| 6|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |final|**[4,464](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/deep-1B_throughput.png)**| - -* The operational point for ranking is 0.90 recall@10. We will use the highest throughput for the search parameters that meet or exceed 0.90 recall@10. -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *eval* = final submissions are being evaluated. - * *final* = final submission and ranking. - -##### Deep1B Power Rankings - -|Rank|Submission |Team |Hardware |Status |W*S/Q | -|----|-------------------------------------------------------|------------------------------|-----------------------|--------|-----------| -| 1|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md) |NVidia |NVidia GPU |final|**[0.0029](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_multigpu/deep-1B_power.png)**| -| 2|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md) |Intel |Intel Optane |final|**[0.0041](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/deep-1B_power.png)**| -| 3|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md) |NVidia |NVidia GPU |final|**[0.0112](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/deep-1B_power.png)**| -| 4|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |final|**[0.0337](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/deep-1B_power.png)**| -| 5|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |final|**[0.1117](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/deep-1B_power.png)**| -| 6|[-](-) |- |- |-|**-**| - -* The operational point for ranking is 0.90 recall@10. We will use the lowest power consumption for the search parameters that meet or exceed 0.90 recall@10. -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *eval* = final submissions are being evaluated. - * *final* = final submission and ranking. - -##### Deep1B Cost Rankings - -|Rank|Submission |Team |Hardware |Status |Cost |capex |opex |unit cost|units@100K qps|KwH*4yrs | -|----|--------------------|---------|-----------------------|--------|-------------|--------|--------|---------|--------------|---------| -| 1|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md)|Intel |Intel Optane |final|**$16,086.82** |$14,664.20|$1,422.62|$14,664.20 |1 |14,226.208| -| 2|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md)|NVidia |NVidia GPU |final|**$151,009.85\*\*** |$150,000.00|$1,009.85|$150,000.00 |1 |10,098.482| -| 3|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md)|NVidia |NVidia GPU |final|**$303,929.39\*\*** |$300,000.00|$3,929.39|$150,000.00 |2 |39,293.902| -| 4|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md)|Facebook Research(*org*) |NVidia GPU |final|**$545,633.16** |$506,503.70|$39,129.46|$22,021.90 |23 |391,294.584| -| 5|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md)|GSI Technology(*org*) |LedaE APU |final|**$569,058.09** |$557,266.60|$11,791.49|$55,726.66 |10 |117,914.908| -| 6|[-](-)|- |- |-|**-** |-|-|- |- |-| - -* The operational point for ranking is 0.90 recall@10. We will use the lowest power consumption/query for the search parameters that meet or exceed 0.90 recall@10. -* The formula is based on: - * Take the algorithm's throughput submitted to leaderboard, use it to scale no. of systems needed to scale to 100K qps (using ceiling to round up any decimal.) - * Capex = cost per system * scale no. - * Take w*s/q from algorithm's power metric submitted to leaderboard and convert to KwH/q. - * Multiply by total queries at 100K qps for 4 years = 4x365x24x60x60x100000 total queries - * Opex = (total queries over 4 years) * KwH/query * $0.10/KwH -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *eval* = final submissions are being evaluated. - * *final* = final submission and ranking. -* \*\*Nvidia has not yet approved the MSRP cost used for ranking, so participation in this benchmark is still pending. - -#### BigANN - -##### BigANN Recall Rankings - -|Rank|Submission |Team |Hardware |Status |R@10 | -|----|-------------------------------------------------------|------------------------------|-----------------------|--------|-------------| -| 1|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md) |Intel |Intel Optane |final|**[0.99978](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/bigann-1B_recall.png)** | -| 2|[diskann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/diskann-bare-metal/README.md) |Microsoft Research India(*org*) |Dell PowerEdge |final|**[0.99976](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/diskann-bare-metal/bigann-1B_recall.png)** | -| 3|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md) |NVidia |NVidia GPU |final|**[0.99881](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/bigann-1B_recall.png)** | -| 4|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md) |NVidia |NVidia GPU |final|**[0.99815](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_multigpu/bigann-1B_recall.png)** | -| 5|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |final|**[0.99328](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/bigann-1B_recall.png)** | -| 6|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |final|**[0.93260](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/bigann-1B_recall.png)** | - -* The operational point for ranking is 2000 QPS. We will use the highest recall for the search parameters that meet or exceed 2000 QPS. -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *eval* = final submissions are being evaluated. - * *final* = final submission and ranking. - -##### BigANN Throughput Rankings - -|Rank|Submission |Team |Hardware |Status |Q/S | -|----|-------------------------------------------------------|------------------------------|-----------------------|--------|-------------| -| 1|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md) |NVidia |NVidia GPU |final|**[747,421](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_multigpu/bigann-1B_throughput.png)** | -| 2|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md) |Intel |Intel Optane |final|**[335,991](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/bigann-1B_throughput.png)** | -| 3|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md) |NVidia |NVidia GPU |final|**[80,109](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/bigann-1B_throughput.png)** | -| 4|[diskann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/diskann-bare-metal/README.md) |Microsoft Research India(*org*) |Dell PowerEdge |final|**[19,094](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/diskann-bare-metal/bigann-1B_throughput.png)** | -| 5|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |final|**[10,672](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/bigann-1B_throughput.png)** | -| 6|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |final|**[3,271](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/bigann-1B_throughput.png)** | - -* The operational point for ranking is 0.90 recall@10. We will use the highest throughput for the search parameters that meet or exceed 0.90 recall@10. -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *eval* = final submissions are being evaluated. - * *final* = final submission and ranking. - -##### BigANN Power Rankings - -|Rank|Submission |Team |Hardware |Status |W*S/Q | -|----|-------------------------------------------------------|------------------------------|-----------------------|--------|-----------| -| 1|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md) |Intel |Intel Optane |final|**[0.0022](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/bigann-1B_power.png)**| -| 2|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md) |NVidia |NVidia GPU |final|**[0.0024](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_multigpu/bigann-1B_power.png)**| -| 3|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md) |NVidia |NVidia GPU |final|**[0.0119](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/bigann-1B_power.png)**| -| 4|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |final|**[0.0341](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/bigann-1B_power.png)**| -| 5|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |final|**[0.1576](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/bigann-1B_power.png)**| -| 6|[-](-) |- |- |-|**-**| - -* The operational point for ranking is 0.90 recall@10. We will use the lowest power consumption for the search parameters that meet or exceed 0.90 recall@10. -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *eval* = final submissions are being evaluated. - * *final* = final submission and ranking. - -##### BigANN Cost Rankings - -|Rank|Submission |Team |Hardware |Status |Cost |capex |opex |unit cost|units@100K qps|KwH*4yrs | -|----|--------------------|------------------------------|--------------------|--------|--------------|--------|--------|---------|--------------|---------| -| 1|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md)|Intel |Intel Optane |final|**$15,439.92** |$14,664.20|$775.72|$14,664.20 |1 |7,757.221| -| 2|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md)|NVidia |NVidia GPU |final|**$150,824.13\*\*** |$150,000.00|$824.13|$150,000.00 |1 |8,241.343| -| 3|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md)|NVidia |NVidia GPU |final|**$304,166.48\*\*** |$300,000.00|$4,166.48|$150,000.00 |2 |41,664.844| -| 4|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md)|GSI Technology(*org*) |LedaE APU |final|**$569,210.35** |$557,266.60|$11,943.75|$55,726.66 |10 |119,437.537| -| 5|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md)|Facebook Research(*org*) |NVidia GPU |final|**$737,886.17** |$682,678.90|$55,207.27|$22,021.90 |31 |552,072.703| -| 6|[-](-)|- |- |-|**-** |-|-|- |- |-| - -* The operational point for ranking is 0.90 recall@10. We will use the lowest power consumption/query for the search parameters that meet or exceed 0.90 recall@10. -* The formula is based on: - * Take the algorithm's throughput submitted to leaderboard, use it to scale no. of systems needed to scale to 100K qps (using ceiling to round up any decimal.) - * Capex = cost per system * scale no. - * Take w*s/q from algorithm's power metric submitted to leaderboard and convert to KwH/q. - * Multiply by total queries at 100K qps for 4 years = 4x365x24x60x60x100000 total queries over 4 years. - * Opex = (total queries over 4 years) * KwH/query * $0.10/KwH -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *eval* = final submissions are being evaluated. - * *final* = final submission and ranking. -* \*\*Nvidia has not yet approved the MSRP cost used for ranking, so participation in this benchmark is still pending. - -#### MSTuring - -##### MSTuring Recall Rankings - -|Rank|Submission |Team |Hardware |Status |R@10 | -|----|-------------------------------------------------------|------------------------------|-----------------------|--------|---------------| -| 1|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md) |Intel |Intel Optane |final|**[0.99568](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/msturing-1B_recall.png)** | -| 2|[diskann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/diskann-bare-metal/README.md) |Microsoft Research India(*org*) |Dell PowerEdge |final|**[0.99444](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/diskann-bare-metal/msturing-1B_recall.png)** | -| 3|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md) |NVidia |NVidia GPU |final|**[0.98993](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/msturing-1B_recall.png)** | -| 4|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md) |NVidia |NVidia GPU |final|**[0.98399](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_multigpu/msturing-1B_recall.png)** | -| 5|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |final|**[0.97841](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/msturing-1B_recall.png)** | -| 6|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |final|**[0.91322](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/msturing-1B_recall.png)** | - -* The operational point for ranking is 2000 QPS. We will use the highest recall for the search parameters that meet or exceed 2000 QPS. -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *eval* = final submissions are being evaluated. - * *final* = final submission and ranking. - -##### MSTuring Throughput Rankings - -|Rank|Submission |Team |Hardware |Status |Q/S | -|----|-------------------------------------------------------|------------------------------|-----------------------|--------|------------| -| 1|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md) |NVidia |NVidia GPU |final|**[584,293](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_multigpu/msturing-1B_throughput.png)** | -| 2|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md) |Intel |Intel Optane |final|**[161,463](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/msturing-1B_throughput.png)** | -| 3|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md) |NVidia |NVidia GPU |final|**[109,745](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/msturing-1B_throughput.png)** | -| 4|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |final|**[21,780](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/msturing-1B_throughput.png)** | -| 5|[diskann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/diskann-bare-metal/README.md) |Microsoft Research India(*org*) |Dell PowerEdge |final|**[17,201](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/diskann-bare-metal/msturing-1B_throughput.png)** | -| 6|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |final|**[2,845](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/msturing-1B_throughput.png)** | - -* The operational point for ranking is 0.90 recall@10. We will use the highest throughput for the search parameters that meet or exceed 0.90 recall@10. -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *eval* = final submissions are being evaluated. - * *final* = final submission and ranking. - -##### MSTuring Power Rankings - -|Rank|Submission |Team |Hardware |Status|W*S/Q | -|----|-------------------------------------------------------|------------------------------|-----------------------|------|--------------| -| 1|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md) |Intel |Intel Optane |final|**[0.0048](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/msturing-1B_power.png)** | -| 2|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md) |NVidia |NVidia GPU |final|**[0.0049](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_multigpu/msturing-1B_power.png)** | -| 3|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md) |NVidia |NVidia GPU |final|**[0.0090](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/msturing-1B_power.png)** | -| 4|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |final|**[0.0236](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/msturing-1B_power.png)** | -| 5|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |final|**[0.1743](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/msturing-1B_power.png)** | -| 6|[-](-) |- |- |-|**-** | - -* The operational point for ranking is 0.90 recall@10. We will use the lowest power consumption for the search parameters that meet or exceed 0.90 recall@10. -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *eval* = final submissions are being evaluated. - * *final* = final submission and ranking. - -##### MSTuring Cost Rankings - -|Rank|Submission |Team |Hardware |Status |Cost |capex |opex |unit cost|units@100K qps|KwH*4yrs | -|----|------------------------------------|------------------------------|-----------------------|--------|--------------|--------|--------|---------|--------------|----------| -| 1|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md) |Intel |Intel Optane |final|**$16,347.45** |$14,664.20|$1,683.25|$14,664.20 |1 |16,832.451 | -| 2|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md) |NVidia |NVidia GPU |final|**$151,726.30\*\*** |$150,000.00|$1,726.30|$150,000.00 |1 |17,262.993 | -| 3|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md) |NVidia |NVidia GPU |final|**$153,151.00\*\*** |$150,000.00|$3,151.00|$150,000.00 |1 |31,509.973 | -| 4|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |final|**$286,911.87** |$278,633.30|$8,278.57|$55,726.66 |5 |82,785.683 | -| 5|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |final|**$853,857.46** |$792,788.40|$61,069.06|$22,021.90 |36 |610,690.611 | -| 6|[-](-) |- |- |-|**-** |-|-|- |- |- | - -* The operational point for ranking is 0.90 recall@10. We will use the lowest power consumption/query for the search parameters that meet or exceed 0.90 recall@10. -* The formula is based on: - * Take the algorithm's throughput submitted to leaderboard, use it to scale no. of systems needed to scale to 100K qps (using ceiling to round up any decimal.) - * Capex = cost per system * scale no. - * Take w*s/q from algorithm's power metric submitted to leaderboard and convert to KwH/q. - * Multiply by total queries at 100K qps for 4 years = 4x365x24x60x60x100000 total queries over 4 years. - * Opex = (total queries over 4 years) * KwH/query * $0.10/KwH -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *eval* = final submissions are being evaluated. - * *final* = final submission and ranking. -* \*\*Nvidia has not yet approved the MSRP cost used for ranking, so participation in this benchmark is still pending. - -#### MSSpace - -##### MSSpace Recall Rankings - -|Rank|Submission |Team |Hardware |Status |R@10 | -|----|-------------------------------------------------------|------------------------------|-----------------------|--------|---------| -| 1|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md) |Intel |Intel Optane |final|**[0.99835](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/msspacev-1B_recall.png)** | -| 2|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md) |NVidia |NVidia GPU |final|**[0.99429](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/msspacev-1B_recall.png)** | -| 3|[diskann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/diskann-bare-metal/README.md) |Microsoft Research India(*org*) |Dell PowerEdge |final|**[0.99342](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/diskann-bare-metal/msspacev-1B_recall.png)** | -| 4|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md) |NVidia |NVidia GPU |final|**[0.98785](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_multigpu/msspacev-1B_recall.png)** | -| 5|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |final|**[0.98622](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/msspacev-1B_recall.png)** | -| 6|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |final|**[0.90853](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/msspacev-1B_recall.png)** | - -* The operational point for ranking is 2000 QPS. We will use the highest recall for the search parameters that meet or exceed 2000 QPS. -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *eval* = final submissions are being evaluated. - * *final* = final submission and ranking. - -##### MSSpace Throughput Rankings - -|Rank|Submission |Team |Hardware |Status |Q/S | -|----|-------------------------------------------------------|------------------------------|-----------------------|--------|------------| -| 1|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md) |NVidia |NVidia GPU |final|**[839,749](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_multigpu/msspacev-1B_throughput.png)** | -| 2|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md) |Intel |Intel Optane |final|**[157,828](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/msspacev-1B_throughput.png)** | -| 3|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md) |NVidia |NVidia GPU |final|**[108,302](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/msspacev-1B_throughput.png)** | -| 4|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |final|**[16,422](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/msspacev-1B_throughput.png)** | -| 5|[diskann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/diskann-bare-metal/README.md) |Microsoft Research India(*org*) |Dell PowerEdge |final|**[6,503](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/diskann-bare-metal/msspacev-1B_throughput.png)** | -| 6|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |final|**[3,265](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/msspacev-1B_throughput.png)** | - -* The operational point for ranking is 0.9 recall@10. We will use the highest throughput for the search parameters that meet or exceed 0.9 recall@10. -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *eval* = final submissions are being evaluated. - * *final* = final submission and ranking. - -##### MSSpace Power Rankings - -|Rank|Submission |Team |Hardware |Status |W*S/Q | -|----|-------------------------------------------------------|------------------------------|-----------------------|--------|------------| -| 1|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md) |NVidia |NVidia GPU |final|**[0.0023](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_multigpu/msspacev-1B_power.png)** | -| 2|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md) |Intel |Intel Optane |final|**[0.0049](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/msspacev-1B_power.png)** | -| 3|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md) |NVidia |NVidia GPU |final|**[0.0090](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/msspacev-1B_power.png)** | -| 4|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |final|**[0.0230](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/msspacev-1B_power.png)** | -| 5|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |final|**[0.1520](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/msspacev-1B_power.png)** | -| 6|[-](-) |- |- |-|**-** | - -* The operational point for ranking is 0.9 recall@10. We will use the lowest power consumption for the search parameters that meet or exceed 0.9 recall@10. -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *eval* = final submissions are being evaluated. - * *final* = final submission and ranking. - -##### MSSpace Cost Rankings - -|Rank|Submission |Team |Hardware |Status |Cost |capex |opex |unit cost|units@100K qps|KwH*4yrs | -|----|--------------------|------------------------------|-----------------------|------- |--------------|--------|--------|---------|--------------|----------| -| 1|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md)|Intel |Intel Optane |final|**$16,382.81** |$14,664.20|$1,718.61|$14,664.20 |1 |17,186.056 | -| 2|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md)|NVidia |NVidia GPU |final|**$150,816.00\*\*** |$150,000.00|$816.00|$150,000.00 |1 |8,160.006 | -| 3|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md)|NVidia |NVidia GPU |final|**$153,155.12\*\*** |$150,000.00|$3,155.12|$150,000.00 |1 |31,551.163 | -| 4|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md)|GSI Technology(*org*) |LedaE APU |final|**$398,163.18** |$390,086.62|$8,076.56|$55,726.66 |7 |80,765.638 | -| 5|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md)|Facebook Research(*org*) |NVidia GPU |final|**$735,942.66** |$682,678.90|$53,263.76|$22,021.90 |31 |532,637.584 | -| 6|[-](-)|- |- |-|**-** |-|-|- |- |- | - -* The operational point for ranking is 0.90 recall@10. We will use the lowest power consumption/query for the search parameters that meet or exceed 0.90 recall@10. -* The formula is based on: - * Take the algorithm's throughput submitted to leaderboard, use it to scale no. of systems needed to scale to 100K qps (using ceiling to round up any decimal.) - * Capex = cost per system * scale no. - * Take w*s/q from algorithm's power metric submitted to leaderboard and convert to KwH/q. - * Multiply by total queries at 100K qps for 4 years = 4x365x24x60x60x100000 total queries over 4 years. - * Opex = (total queries over 4 years) * KwH/query * $0.10/KwH -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *eval* = final submissions are being evaluated. - * *final* = final submission and ranking. -* \*\*Nvidia has not yet approved the MSRP cost used for ranking, so participation in this benchmark is still pending. - -#### Text2Image - -##### Text2Image Recall Rankings - -|Rank|Submission |Team |Hardware |Status |R@10 | -|----|-------------------------------------------------------|------------------------------|-----------------------|--------|-------------| -| 1|[diskann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/diskann-bare-metal/README.md) |Microsoft Research India(*org*) |Dell PowerEdge |final|**[0.98130](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/diskann-bare-metal/text2image-1B_recall.png)** | -| 2|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md) |Intel |Intel Optane |final|**[0.97340](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/text2image-1B_recall.png)** | -| 3|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md) |NVidia |NVidia GPU |final|**[0.94692](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/text2image-1B_recall.png)** | -| 4|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |final|**[0.92855](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/text2image-1B_recall.png)** | -| 5|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |final|**[0.86028](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/text2image-1B_recall.png)** | -| 6|[-](-) |- |- |-|**-** | - -* The operational point for ranking is 2000 QPS. We will use the highest recall for the search parameters that meet or exceed 2000 QPS. -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *eval* = final submissions are being evaluated. - * *final* = final submission and ranking. - -##### Text2Image Throughput Rankings - -|Rank|Submission |Team |Hardware |Status |Q/S | -|----|-------------------------------------------------------|------------------------------|-----------------------|--------|------------| -| 1|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md) |NVidia |NVidia GPU |final|**[19,094](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/text2image-1B_throughput.png)** | -| 2|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md) |Intel |Intel Optane |final|**[17,063](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/text2image-1B_throughput.png)** | -| 3|[diskann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/diskann-bare-metal/README.md) |Microsoft Research India(*org*) |Dell PowerEdge |final|**[9,307](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/diskann-bare-metal/text2image-1B_throughput.png)** | -| 4|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |final|**[4,838](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/text2image-1B_throughput.png)** | -| 5|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |final|**[1,789](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/text2image-1B_throughput.png)** | -| 6|[-](-) |- |- |-|**-** | - -* The operational point for ranking is 0.860 recall@10. We will use the highest throughput for the search parameters that meet or exceed 0.860 recall@10. -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *eval* = final submissions are being evaluated. - * *final* = final submission and ranking. - -##### Text2Image Power Rankings - -|Rank|Submission |Team |Hardware |Status |W*S/Q | -|----|-------------------------------------------------------|------------------------------|-----------------------|--------|-----------| -| 1|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md) |Intel |Intel Optane |final|**[0.0446](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/text2image-1B_power.png)**| -| 2|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md) |NVidia |NVidia GPU |final|**[0.0480](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/text2image-1B_power.png)**| -| 3|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |final|**[0.1128](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/text2image-1B_power.png)**| -| 4|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |final|**[0.1242](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/text2image-1B_power.png)**| -| 5|[-](-) |- |- |-|**-**| -| 6|[-](-) |- |- |-|**-**| - -* The operational point for ranking is 0.86 recall@10. We will use the lowest power consumption for the search parameters that meet or exceed 0.86 recall@10. -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *eval* = final submissions are being evaluated. - * *final* = final submission and ranking. - -##### Text2Image Cost Rankings - -|Rank|Submission |Team |Hardware |Status |Cost |capex |opex |unit cost|units@100K qps|KwH*4yrs | -|----|---------------------|------------------------------|---------------------|--------|--------------|--------|--------|---------|--------------|---------| -| 1|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md) |Intel |Intel Optane |final|**$103,599.49** |$87,985.20|$15,614.29|$14,664.20 |6 |156,142.873| -| 2|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md) |NVidia |NVidia GPU |final|**$916,823.34\*\*** |$900,000.00|$16,823.34|$150,000.00 |6 |168,233.421| -| 3|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |final|**$1,213,773.56** |$1,170,259.86|$43,513.70|$55,726.66 |21 |435,137.010| -| 4|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |final|**$1,272,735.86** |$1,233,226.40|$39,509.46|$22,021.90 |56 |395,094.625| -| 5|[-](-) |- |- |-|**-** |-|-|- |- |-| -| 6|[-](-) |- |- |-|**-** |-|-|- |- |-| - -* The operational point for ranking is 0.86 recall@10. We will use the lowest power consumption/query for the search parameters that meet or exceed 0.86 recall@10. -* The formula is based on: - * Take the algorithm's throughput submitted to leaderboard, use it to scale no. of systems needed to scale to 100K qps (using ceiling to round up any decimal.) - * Capex = cost per system * scale no. - * Take w*s/q from algorithm's power metric submitted to leaderboard and convert to KwH/q. - * Multiply by total queries at 100K qps for 4 years = 4x365x24x60x60x100000 total queries over 4 years. - * Opex = (total queries over 4 years) * KwH/query * $0.10/KwH -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *eval* = final submissions are being evaluated. - * *final* = final submission and ranking. -* \*\*Nvidia has not yet approved the MSRP cost used for ranking, so participation in this benchmark is still pending. - -#### FBSimSearchNet - -##### FBSimSearchNet AP Rankings - -|Rank|Submission |Team |Hardware |Status |AP | -|----|-------------------------------------------------------|------------------------------|-----------------------|--------|-------------| -| 1|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |final|**[0.99684](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/ssnpp-1B_recall.png)** | -| 2|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |final|**[0.97863](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/ssnpp-1B_recall.png)** | -| 3|[-](-) |- |- |-|**-** | -| 4|[-](-) |- |- |-|**-** | -| 5|[-](-) |- |- |-|**-** | -| 6|[-](-) |- |- |-|**-** | - -* The operational point for ranking is 2000 QPS. We will use the highest recall for the search parameters that meet or exceed 2000 QPS. -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *eval* = final submissions are being evaluated. - * *final* = final submission and ranking. - -##### FBSimSearchNet Throughput Rankings - -|Rank|Submission |Team |Hardware |Status |Q/S | -|----|-------------------------------------------------------|------------------------------|-----------------------|--------|------------| -| 1|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |final|**[9,345](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/ssnpp-1B_throughput.png)** | -| 2|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |final|**[5,699](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/ssnpp-1B_throughput.png)** | -| 3|[-](-) |- |- |-|**-** | -| 4|[-](-) |- |- |-|**-** | -| 5|[-](-) |- |- |-|**-** | -| 6|[-](-) |- |- |-|**-** | - -* The operational point for ranking is 0.9 recall@10. We will use the highest throughput for the search parameters that meet or exceed 0.9 average precision. -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *eval* = final submissions are being evaluated. - * *final* = final submission and ranking. - - -##### FBSimSearchNet Power Rankings - -|Rank|Submission |Team |Hardware |Status |W*S/Q | -|----|-------------------------------------------------------|------------------------------|-----------------------|--------|-----------| -| 1|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |final|**[0.0469](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/ssnpp-1B_power.png)**| -| 2|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |final|**[0.0904](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/ssnpp-1B_power.png)**| -| 3|[-](-) |- |- |-|**-**| -| 4|[-](-) |- |- |-|**-**| -| 5|[-](-) |- |- |-|**-**| -| 6|[-](-) |- |- |-|**-**| - -* The operational point for ranking is 0.9 recall@10. We will use the lowest power consumption for the search parameters that meet or exceed 0.9 average precision. -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *eval* = final submissions are being evaluated. - * *final* = final submission and ranking. - -##### FBSimSearchNet Cost Rankings - -|Rank|Submission |Team |Hardware |Status |Cost |capex |opex |unit cost|units@100K qps|KwH*4yrs | -|----|--------------------|------------------------------|---------------------|--------|--------------|--------|--------|---------|--------------|---------| -| 1|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md)|Facebook Research(*org*) |NVidia GPU |final|**$428,074.79** |$396,394.20|$31,680.59|$22,021.90 |18 |316,805.859| -| 2|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md)|GSI Technology(*org*) |LedaE APU |final|**$629,442.91** |$612,993.26|$16,449.65|$55,726.66 |11 |164,496.451| -| 3|[-](-)|- |- |-|**-** | -| -| -| -| -| -| 4|[-](-)|- |- |-|**-** | -| -| -| -| -| - -* The operational point for ranking is 0.9 recall@10. We will use the lowest power consumption/query for the search parameters that meet or exceed 0.9 recall@10. -* The formula is based on: - * Take the algorithm's throughput submitted to leaderboard, use it to scale no. of systems needed to scale to 100K qps (using ceiling to round up any decimal.) - * Capex = cost per system * scale no. - * Take w*s/q from algorithm's power metric submitted to leaderboard and convert to KwH/q. - * Multiply by total queries at 100K qps for 4 years = 4x365x24x60x60x100000 total queries over 4 years. - * Opex = (total queries over 4 years) * KwH/query * $0.10/KwH -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *eval* = final submissions are being evaluated. - * *final* = final submission and ranking. -* \*\*Nvidia has not yet approved the MSRP cost used for ranking, so participation in this benchmark is still pending. diff --git a/t3/LEADERBOARDS_PUBLIC_REJECT_ANOMALIES.md b/t3/LEADERBOARDS_PUBLIC_REJECT_ANOMALIES.md deleted file mode 100644 index c36c071fe..000000000 --- a/t3/LEADERBOARDS_PUBLIC_REJECT_ANOMALIES.md +++ /dev/null @@ -1,566 +0,0 @@ -# T3 Track Public Dataset Leaderboards After Rejecting Anomalies - -Please note that all rankings and winners are unofficial until all [open tasks and issues](TASKS_ISSUES_RESOLUTIONS.md) are resolved. - -## Rankings By Category - -### Rankings By Submission Name (alphabetical) - -|Submission |Team |Hardware |[Recall Rank](#recall-or-ap-rankings)|[Thru-put Rank](#throughput-rankings)|[Power Rank](#power-rankings)|[Cost Rank](#cost-rankings)|Status |Anomalies|Evaluator|Algo |Runs | -|--------------------|-----------|----------|---------|---------|---------|--------|---------|---------|---------|---------|--------| -|[diskann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/diskann-bare-metal/README.md) |Microsoft Research India(*org*) |Dell PowerEdge |[1](#recall-or-ap-rankings) |[5](#throughput-rankings) |*NQ* |*NQ* |final |*NA* |[Harsha Simhadri](https://github.com/harsha-simhadri) |- |- | -|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |[6](#recall-or-ap-rankings) |[6](#throughput-rankings) |[5](#power-rankings) |[5](#cost-rankings) |final |0/58 |[George Williams](https://github.com/sourcesync) |- |- | -|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |[4](#recall-or-ap-rankings) |[4](#throughput-rankings) |[4](#power-rankings) |[4](#cost-rankings) |final |0/60 |[George Williams](https://github.com/sourcesync) |- |- | -|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md) |NVidia |NVidia GPU |[3](#recall-or-ap-rankings) |[3](#throughput-rankings) |[2](#power-rankings) |[2](#cost-rankings)\*\* |final |[6/50](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/ANOMALIES.md) |[George Williams](https://github.com/sourcesync) |- |- | -|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md) |NVidia |NVidia GPU |[5](#recall-or-ap-rankings) |[1](#throughput-rankings) |[3](#power-rankings) |[3](#cost-rankings)\*\* |final |[4/40](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/ANOMALIES.md) |[George Williams](https://github.com/sourcesync) |- |- | -|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md)|Intel |Intel Optane |[2](#recall-or-ap-rankings) |[2](#throughput-rankings) |[1](#power-rankings) |[1](#cost-rankings)|final |[5/50](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/ANOMALIES.md) |[George Williams](https://github.com/sourcesync) |- |-| - -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *eval* = final submissions are being evaluated. - * *final* = final submission and ranking. - * *NQ* = not qualified - * *NA* = data is not yet available, or has not yet been processed - -* *Anomalies* are defined as queries that could potentially be the result of query response caching, a violation of the competition. Our detection method looks for a 30% or more improvement in the batch query latency between the first and last query of a query group (5). Participants have been given a chance to explain why detected anomalies (if any) are not a result of query response caching. In general, our analysis did not uncover this symptom of systematic query response caching from any submission. Also, if we throw out the anomalous data points, the adjusted leaderboard rankings (above) do not change even though some scores change slightly. - -* \*\*Nvidia has not yet approved the MSRP cost used for ranking, so participation in this benchmark is still pending. - -### Rankings Per Benchmark - -#### Recall Or AP Rankings - -|Rank|Submission |Team |Hardware|Status |Score |[Deep1B](#deep1B-recall-rankings)|[BigANN](#bigann-recall-rankings)|[MSTuring](#msturing-recall-rankings)|[MSSpace](#msspace-recall-rankings)|[Text2Image](#text2image-recall-rankings)|[FBSSNet](#fbsimsearchnet-ap-rankings)| -|----|------------------|-------|--------|-------|-----------|------|------|--------|-------|----------|-------| -| 1|[diskann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/diskann-bare-metal/README.md)|Microsoft Research India(*org*)|Dell PowerEdge |final|**0.410**|0.99821 |0.99976 |0.99444 |0.99342 |0.98130 |- | -| 2|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md)|Intel|Intel Optane |final|**0.409**|0.99882 |0.99978 |0.99568 |0.99835 |0.97340 |- | -| 3|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md)|NVidia|NVidia GPU |final|**0.368**|0.99543 |0.99881 |0.98993 |0.99429 |0.94692 |- | -| 4|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md)|GSI Technology(*org*)|LedaE APU |final|**0.339**|0.99208 |0.99328 |0.97841 |0.98622 |0.92855 |0.99684 | -| 5|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md)|NVidia|NVidia GPU |final|**0.166**|0.95736 |0.96750 |0.96286 |0.97541 |- |- | -| 6|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md)|Facebook Research(*org*)|NVidia GPU |final|**baseline**|0.94275 |0.93260 |0.91322 |0.90853 |0.86028 |0.97863 | - -* A submission must support at least 3 databases to qualify for this ranking. -* The ranking is based on the score, which is the sum of benchmark improvements of qualifying databases (shown in specific database columns after the score column.) -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *eval* = final submissions are being evaluated. - * *final* = final submission and ranking. - -#### Throughput Rankings - -|Rank|Submission |Team |Hardware|Status |Score |[Deep1B](#deep1B-throughput-rankings)|[BigANN](#bigann-throughput-rankings)|[MSTuring](#msturing-throughput-rankings)|[MSSpace](#msspace-throughput-rankings)|[Text2Image](#text2image-throughput-rankings)|[FBSSNet](#fbsimsearchnet-throughput-rankings)| -|----|------------------|-------|--------|-------|-----------|------|------|--------|-------|----------|--------------| -| 1|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md)|NVidia|NVidia GPU |final|**2959313**|801,694 |747,421 |584,293 |839,749 |- |- | -| 2|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md)|Intel|Intel Optane |final|**851327**|196,546 |335,991 |161,463 |155,899 |17,063 |- | -| 3|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md)|NVidia|NVidia GPU |final|**393318**|91,701 |80,109 |109,745 |108,302 |19,094 |- | -| 4|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md)|GSI Technology(*org*)|LedaE APU |final|**52429**|10,704 |10,672 |21,780 |16,422 |4,838 |9,345 | -| 5|[diskann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/diskann-bare-metal/README.md)|Microsoft Research India(*org*)|Dell PowerEdge |final|**49398**|12,927 |19,094 |17,201 |6,503 |9,307 |- | -| 6|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md)|Facebook Research(*org*)|NVidia GPU |final|**baseline**|4,464 |3,271 |2,845 |3,265 |1,789 |5,699 | - -* A submission must support at least 3 databases to qualify for this ranking. -* The ranking is based on the score, which is the sum of benchmark improvements of qualifying databases (shown in specific database columns after the score column.) -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *eval* = final submissions are being evaluated. - * *final* = final submission and ranking. - -#### Power Rankings - -|Rank|Submission |Team |Hardware|Status |Score |[Deep1B](#deep1B-power-rankings)|[BigANN](#bigann-power-rankings)|[MSTuring](#msturing-power-rankings)|[MSSpace](#msspace-power-rankings)|[Text2Image](#text2image-power-rankings)|[FBSSNet](#fbsimsearchnet-power-rankings)| -|----|------------------|-------|--------|-------|-----------|------|------|--------|-------|-----|-----| -| 1|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md)|Intel|Intel Optane |final|**-0.648**|0.0041 |0.0022 |0.0048 |0.0050 |0.0446|-| -| 2|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md)|NVidia|NVidia GPU |final|**-0.619**|0.0112 |0.0119 |0.0090 |0.0090 |0.0480|-| -| 3|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md)|NVidia|NVidia GPU |final|**-0.583**|0.0029 |0.0024 |0.0049 |0.0023 |-|-| -| 4|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md)|GSI Technology(*org*)|LedaE APU |final|**-0.513**|0.0337 |0.0341 |0.0236 |0.0230 |0.1242|0.0469| -| 5|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/diskann-bare-metal/README.md)|Facebook Research(*org*)|NVidia GPU |final|**baseline**|0.1117 |0.1576 |0.1743 |0.1520 |0.1128|0.0904| -| 6|[-](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md)|-|- |-|**-**|- |- |- |- |-|-| - -* A submission must support at least 3 databases to qualify for this ranking. -* The ranking is based on the score, which is the sum of benchmark improvements of qualifying databases (shown in specific database columns after the score column.) -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *eval* = final submissions are being evaluated. - * *final* = final submission and ranking. - -#### Cost Rankings - -|Rank|Submission |Team |Hardware|Status |Score |[Deep1B](#deep1B-cost-rankings)|[BigANN](#bigann-cost-rankings)|[MSTuring](#msturing-cost-rankings)|[MSSpace](#msspace-cost-rankings)|[Text2Image](#text2image-cost-rankings)|[FBSSNet](#fbsimsearchnet-cost-rankings)| -|----|------------------|-------|--------|-------|-----------|------|------|--------|-------|----------|--------------| -| 1|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md)|Intel|Intel Optane |final|**$-3,978,172.56**|$16,086.82 |$15,439.92 |$16,347.45 |$16,409.08 |$103,599.49 |- | -| 2|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md)|NVidia|NVidia GPU |final|**$-2,314,829.98\*\***|$303,929.39 |$304,166.48 |$153,151.00 |$153,155.12 |$916,823.34 |- | -| 3|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md)|NVidia|NVidia GPU |final|**$-2,268,943.17\*\***|$151,009.85 |$150,824.13 |$151,726.30 |$150,816.00 |- |- | -| 4|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md)|GSI Technology(*org*)|LedaE APU |final|**$-907,570.13**|$569,058.09 |$569,210.35 |$286,911.87 |$398,163.18 |$1,213,773.56 |$629,442.91 | -| 5|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md)|Facebook Research(*org*)|NVidia GPU |final|**baseline**|$545,633.16 |$737,886.17 |$853,857.46 |$735,942.66 |$1,272,735.86 |$428,074.79 | -| 6|[-](-)|-|- |-|**-**|- |- |- |- |- |- | - -* A submission must support at least 3 databases to qualify for this ranking. -* The ranking is based on the score, which is the sum of benchmark improvements of qualifying databases (shown in specific database columns after the score column.) -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *eval* = final submissions are being evaluated. - * *final* = final submission and ranking. - -* \*\*Nvidia has not yet approved the MSRP cost used for ranking, so participation in this benchmark is still pending. - -### Rankings Per Database - -#### Deep1B - -##### Deep1B Recall Rankings - -|Rank|Submission |Team |Hardware |Status |R@10 | -|----|-------------------------------------------------------|------------------------------|-----------------------|--------|-----------| -| 1|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md) |Intel |Intel Optane |final|**0.99882**| -| 2|[diskann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/diskann-bare-metal/README.md) |Microsoft Research India(*org*) |Dell PowerEdge |final|**0.99821**| -| 3|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md) |NVidia |NVidia GPU |final|**0.99543**| -| 4|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |final|**0.99208**| -| 5|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md) |NVidia |NVidia GPU |final|**0.95736**| -| 6|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |final|**0.94275**| - -* The operational point for ranking is 2000 QPS. We will use the highest recall for the search parameters that meet or exceed 2000 QPS. -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *eval* = final submissions are being evaluated. - * *final* = final submission and ranking. - -##### Deep1B Throughput Rankings - -|Rank|Submission |Team |Hardware |Status |Q/S | -|----|-------------------------------------------------------|------------------------------|-----------------------|--------|-----------| -| 1|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md) |NVidia |NVidia GPU |final|**801,694**| -| 2|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md) |Intel |Intel Optane |final|**196,546**| -| 3|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md) |NVidia |NVidia GPU |final|**91,701**| -| 4|[diskann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/diskann-bare-metal/README.md) |Microsoft Research India(*org*) |Dell PowerEdge |final|**12,927**| -| 5|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |final|**10,704**| -| 6|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |final|**4,464**| - -* The operational point for ranking is 0.90 recall@10. We will use the highest throughput for the search parameters that meet or exceed 0.90 recall@10. -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *eval* = final submissions are being evaluated. - * *final* = final submission and ranking. - -##### Deep1B Power Rankings - -|Rank|Submission |Team |Hardware |Status |W*S/Q | -|----|-------------------------------------------------------|------------------------------|-----------------------|--------|-----------| -| 1|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md) |NVidia |NVidia GPU |final|**0.0029**| -| 2|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md) |Intel |Intel Optane |final|**0.0041**| -| 3|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md) |NVidia |NVidia GPU |final|**0.0112**| -| 4|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |final|**0.0337**| -| 5|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |final|**0.1117**| -| 6|[-](-) |- |- |-|**-**| - -* The operational point for ranking is 0.90 recall@10. We will use the lowest power consumption for the search parameters that meet or exceed 0.90 recall@10. -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *eval* = final submissions are being evaluated. - * *final* = final submission and ranking. - -##### Deep1B Cost Rankings - -|Rank|Submission |Team |Hardware |Status |Cost |capex |opex |unit cost|units@100K qps|KwH*4yrs | -|----|--------------------|---------|-----------------------|--------|-------------|--------|--------|---------|--------------|---------| -| 1|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md)|Intel |Intel Optane |final|**$16,086.82** |$14,664.20|$1,422.62|$14,664.20 |1 |14,226.208| -| 2|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md)|NVidia |NVidia GPU |final|**$151,009.85\*\*** |$150,000.00|$1,009.85|$150,000.00 |1 |10,098.482| -| 3|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md)|NVidia |NVidia GPU |final|**$303,929.39\*\*** |$300,000.00|$3,929.39|$150,000.00 |2 |39,293.902| -| 4|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md)|Facebook Research(*org*) |NVidia GPU |final|**$545,633.16** |$506,503.70|$39,129.46|$22,021.90 |23 |391,294.584| -| 5|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md)|GSI Technology(*org*) |LedaE APU |final|**$569,058.09** |$557,266.60|$11,791.49|$55,726.66 |10 |117,914.908| -| 6|[-](-)|- |- |-|**-** |-|-|- |- |-| - -* The operational point for ranking is 0.90 recall@10. We will use the lowest power consumption/query for the search parameters that meet or exceed 0.90 recall@10. -* The formula is based on: - * Take the algorithm's throughput submitted to leaderboard, use it to scale no. of systems needed to scale to 100K qps (using ceiling to round up any decimal.) - * Capex = cost per system * scale no. - * Take w*s/q from algorithm's power metric submitted to leaderboard and convert to KwH/q. - * Multiply by total queries at 100K qps for 4 years = 4x365x24x60x60x100000 total queries - * Opex = (total queries over 4 years) * KwH/query * $0.10/KwH -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *eval* = final submissions are being evaluated. - * *final* = final submission and ranking. -* \*\*Nvidia has not yet approved the MSRP cost used for ranking, so participation in this benchmark is still pending. - -#### BigANN - -##### BigANN Recall Rankings - -|Rank|Submission |Team |Hardware |Status |R@10 | -|----|-------------------------------------------------------|------------------------------|-----------------------|--------|-------------| -| 1|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md) |Intel |Intel Optane |final|**0.99978** | -| 2|[diskann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/diskann-bare-metal/README.md) |Microsoft Research India(*org*) |Dell PowerEdge |final|**0.99976** | -| 3|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md) |NVidia |NVidia GPU |final|**0.99881** | -| 4|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |final|**0.99328** | -| 5|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md) |NVidia |NVidia GPU |final|**0.96750** | -| 6|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |final|**0.93260** | - -* The operational point for ranking is 2000 QPS. We will use the highest recall for the search parameters that meet or exceed 2000 QPS. -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *eval* = final submissions are being evaluated. - * *final* = final submission and ranking. - -##### BigANN Throughput Rankings - -|Rank|Submission |Team |Hardware |Status |Q/S | -|----|-------------------------------------------------------|------------------------------|-----------------------|--------|-------------| -| 1|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md) |NVidia |NVidia GPU |final|**747,421** | -| 2|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md) |Intel |Intel Optane |final|**335,991** | -| 3|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md) |NVidia |NVidia GPU |final|**80,109** | -| 4|[diskann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/diskann-bare-metal/README.md) |Microsoft Research India(*org*) |Dell PowerEdge |final|**19,094** | -| 5|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |final|**10,672** | -| 6|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |final|**3,271** | - -* The operational point for ranking is 0.90 recall@10. We will use the highest throughput for the search parameters that meet or exceed 0.90 recall@10. -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *eval* = final submissions are being evaluated. - * *final* = final submission and ranking. - -##### BigANN Power Rankings - -|Rank|Submission |Team |Hardware |Status |W*S/Q | -|----|-------------------------------------------------------|------------------------------|-----------------------|--------|-----------| -| 1|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md) |Intel |Intel Optane |final|**0.0022**| -| 2|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md) |NVidia |NVidia GPU |final|**0.0024**| -| 3|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md) |NVidia |NVidia GPU |final|**0.0119**| -| 4|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |final|**0.0341**| -| 5|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |final|**0.1576**| -| 6|[-](-) |- |- |-|**-**| - -* The operational point for ranking is 0.90 recall@10. We will use the lowest power consumption for the search parameters that meet or exceed 0.90 recall@10. -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *eval* = final submissions are being evaluated. - * *final* = final submission and ranking. - -##### BigANN Cost Rankings - -|Rank|Submission |Team |Hardware |Status |Cost |capex |opex |unit cost|units@100K qps|KwH*4yrs | -|----|--------------------|------------------------------|--------------------|--------|--------------|--------|--------|---------|--------------|---------| -| 1|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md)|Intel |Intel Optane |final|**$15,439.92** |$14,664.20|$775.72|$14,664.20 |1 |7,757.221| -| 2|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md)|NVidia |NVidia GPU |final|**$150,824.13\*\*** |$150,000.00|$824.13|$150,000.00 |1 |8,241.343| -| 3|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md)|NVidia |NVidia GPU |final|**$304,166.48\*\*** |$300,000.00|$4,166.48|$150,000.00 |2 |41,664.844| -| 4|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md)|GSI Technology(*org*) |LedaE APU |final|**$569,210.35** |$557,266.60|$11,943.75|$55,726.66 |10 |119,437.537| -| 5|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md)|Facebook Research(*org*) |NVidia GPU |final|**$737,886.17** |$682,678.90|$55,207.27|$22,021.90 |31 |552,072.703| -| 6|[-](-)|- |- |-|**-** |-|-|- |- |-| - -* The operational point for ranking is 0.90 recall@10. We will use the lowest power consumption/query for the search parameters that meet or exceed 0.90 recall@10. -* The formula is based on: - * Take the algorithm's throughput submitted to leaderboard, use it to scale no. of systems needed to scale to 100K qps (using ceiling to round up any decimal.) - * Capex = cost per system * scale no. - * Take w*s/q from algorithm's power metric submitted to leaderboard and convert to KwH/q. - * Multiply by total queries at 100K qps for 4 years = 4x365x24x60x60x100000 total queries over 4 years. - * Opex = (total queries over 4 years) * KwH/query * $0.10/KwH -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *eval* = final submissions are being evaluated. - * *final* = final submission and ranking. -* \*\*Nvidia has not yet approved the MSRP cost used for ranking, so participation in this benchmark is still pending. - -#### MSTuring - -##### MSTuring Recall Rankings - -|Rank|Submission |Team |Hardware |Status |R@10 | -|----|-------------------------------------------------------|------------------------------|-----------------------|--------|---------------| -| 1|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md) |Intel |Intel Optane |final|**0.99568** | -| 2|[diskann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/diskann-bare-metal/README.md) |Microsoft Research India(*org*) |Dell PowerEdge |final|**0.99444** | -| 3|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md) |NVidia |NVidia GPU |final|**0.98993** | -| 4|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |final|**0.97841** | -| 5|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md) |NVidia |NVidia GPU |final|**0.96286** | -| 6|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |final|**0.91322** | - -* The operational point for ranking is 2000 QPS. We will use the highest recall for the search parameters that meet or exceed 2000 QPS. -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *eval* = final submissions are being evaluated. - * *final* = final submission and ranking. - -##### MSTuring Throughput Rankings - -|Rank|Submission |Team |Hardware |Status |Q/S | -|----|-------------------------------------------------------|------------------------------|-----------------------|--------|------------| -| 1|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md) |NVidia |NVidia GPU |final|**584,293** | -| 2|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md) |Intel |Intel Optane |final|**161,463** | -| 3|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md) |NVidia |NVidia GPU |final|**109,745** | -| 4|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |final|**21,780** | -| 5|[diskann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/diskann-bare-metal/README.md) |Microsoft Research India(*org*) |Dell PowerEdge |final|**17,201** | -| 6|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |final|**2,845** | - -* The operational point for ranking is 0.90 recall@10. We will use the highest throughput for the search parameters that meet or exceed 0.90 recall@10. -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *eval* = final submissions are being evaluated. - * *final* = final submission and ranking. - -##### MSTuring Power Rankings - -|Rank|Submission |Team |Hardware |Status|W*S/Q | -|----|-------------------------------------------------------|------------------------------|-----------------------|------|--------------| -| 1|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md) |Intel |Intel Optane |final|**0.0048** | -| 2|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md) |NVidia |NVidia GPU |final|**0.0049** | -| 3|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md) |NVidia |NVidia GPU |final|**0.0090** | -| 4|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |final|**0.0236** | -| 5|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |final|**0.1743** | -| 6|[-](-) |- |- |-|**-** | - -* The operational point for ranking is 0.90 recall@10. We will use the lowest power consumption for the search parameters that meet or exceed 0.90 recall@10. -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *eval* = final submissions are being evaluated. - * *final* = final submission and ranking. - -##### MSTuring Cost Rankings - -|Rank|Submission |Team |Hardware |Status |Cost |capex |opex |unit cost|units@100K qps|KwH*4yrs | -|----|------------------------------------|------------------------------|-----------------------|--------|--------------|--------|--------|---------|--------------|----------| -| 1|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md) |Intel |Intel Optane |final|**$16,347.45** |$14,664.20|$1,683.25|$14,664.20 |1 |16,832.451 | -| 2|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md) |NVidia |NVidia GPU |final|**$151,726.30\*\*** |$150,000.00|$1,726.30|$150,000.00 |1 |17,262.993 | -| 3|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md) |NVidia |NVidia GPU |final|**$153,151.00\*\*** |$150,000.00|$3,151.00|$150,000.00 |1 |31,509.973 | -| 4|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |final|**$286,911.87** |$278,633.30|$8,278.57|$55,726.66 |5 |82,785.683 | -| 5|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |final|**$853,857.46** |$792,788.40|$61,069.06|$22,021.90 |36 |610,690.611 | -| 6|[-](-) |- |- |-|**-** |-|-|- |- |- | - -* The operational point for ranking is 0.90 recall@10. We will use the lowest power consumption/query for the search parameters that meet or exceed 0.90 recall@10. -* The formula is based on: - * Take the algorithm's throughput submitted to leaderboard, use it to scale no. of systems needed to scale to 100K qps (using ceiling to round up any decimal.) - * Capex = cost per system * scale no. - * Take w*s/q from algorithm's power metric submitted to leaderboard and convert to KwH/q. - * Multiply by total queries at 100K qps for 4 years = 4x365x24x60x60x100000 total queries over 4 years. - * Opex = (total queries over 4 years) * KwH/query * $0.10/KwH -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *eval* = final submissions are being evaluated. - * *final* = final submission and ranking. -* \*\*Nvidia has not yet approved the MSRP cost used for ranking, so participation in this benchmark is still pending. - -#### MSSpace - -##### MSSpace Recall Rankings - -|Rank|Submission |Team |Hardware |Status |R@10 | -|----|-------------------------------------------------------|------------------------------|-----------------------|--------|---------| -| 1|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md) |Intel |Intel Optane |final|**0.99835** | -| 2|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md) |NVidia |NVidia GPU |final|**0.99429** | -| 3|[diskann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/diskann-bare-metal/README.md) |Microsoft Research India(*org*) |Dell PowerEdge |final|**0.99342** | -| 4|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |final|**0.98622** | -| 5|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md) |NVidia |NVidia GPU |final|**0.97541** | -| 6|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |final|**0.90853** | - -* The operational point for ranking is 2000 QPS. We will use the highest recall for the search parameters that meet or exceed 2000 QPS. -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *eval* = final submissions are being evaluated. - * *final* = final submission and ranking. - -##### MSSpace Throughput Rankings - -|Rank|Submission |Team |Hardware |Status |Q/S | -|----|-------------------------------------------------------|------------------------------|-----------------------|--------|------------| -| 1|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md) |NVidia |NVidia GPU |final|**839,749** | -| 2|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md) |Intel |Intel Optane |final|**155,899** | -| 3|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md) |NVidia |NVidia GPU |final|**108,302** | -| 4|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |final|**16,422** | -| 5|[diskann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/diskann-bare-metal/README.md) |Microsoft Research India(*org*) |Dell PowerEdge |final|**6,503** | -| 6|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |final|**3,265** | - -* The operational point for ranking is 0.9 recall@10. We will use the highest throughput for the search parameters that meet or exceed 0.9 recall@10. -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *eval* = final submissions are being evaluated. - * *final* = final submission and ranking. - -##### MSSpace Power Rankings - -|Rank|Submission |Team |Hardware |Status |W*S/Q | -|----|-------------------------------------------------------|------------------------------|-----------------------|--------|------------| -| 1|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md) |NVidia |NVidia GPU |final|**0.0023** | -| 2|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md) |Intel |Intel Optane |final|**0.0050** | -| 3|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md) |NVidia |NVidia GPU |final|**0.0090** | -| 4|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |final|**0.0230** | -| 5|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |final|**0.1520** | -| 6|[-](-) |- |- |-|**-** | - -* The operational point for ranking is 0.9 recall@10. We will use the lowest power consumption for the search parameters that meet or exceed 0.9 recall@10. -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *eval* = final submissions are being evaluated. - * *final* = final submission and ranking. - -##### MSSpace Cost Rankings - -|Rank|Submission |Team |Hardware |Status |Cost |capex |opex |unit cost|units@100K qps|KwH*4yrs | -|----|--------------------|------------------------------|-----------------------|------- |--------------|--------|--------|---------|--------------|----------| -| 1|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md)|Intel |Intel Optane |final|**$16,409.08** |$14,664.20|$1,744.88|$14,664.20 |1 |17,448.764 | -| 2|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md)|NVidia |NVidia GPU |final|**$150,816.00\*\*** |$150,000.00|$816.00|$150,000.00 |1 |8,160.006 | -| 3|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md)|NVidia |NVidia GPU |final|**$153,155.12\*\*** |$150,000.00|$3,155.12|$150,000.00 |1 |31,551.163 | -| 4|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md)|GSI Technology(*org*) |LedaE APU |final|**$398,163.18** |$390,086.62|$8,076.56|$55,726.66 |7 |80,765.638 | -| 5|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md)|Facebook Research(*org*) |NVidia GPU |final|**$735,942.66** |$682,678.90|$53,263.76|$22,021.90 |31 |532,637.584 | -| 6|[-](-)|- |- |-|**-** |-|-|- |- |- | - -* The operational point for ranking is 0.90 recall@10. We will use the lowest power consumption/query for the search parameters that meet or exceed 0.90 recall@10. -* The formula is based on: - * Take the algorithm's throughput submitted to leaderboard, use it to scale no. of systems needed to scale to 100K qps (using ceiling to round up any decimal.) - * Capex = cost per system * scale no. - * Take w*s/q from algorithm's power metric submitted to leaderboard and convert to KwH/q. - * Multiply by total queries at 100K qps for 4 years = 4x365x24x60x60x100000 total queries over 4 years. - * Opex = (total queries over 4 years) * KwH/query * $0.10/KwH -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *eval* = final submissions are being evaluated. - * *final* = final submission and ranking. -* \*\*Nvidia has not yet approved the MSRP cost used for ranking, so participation in this benchmark is still pending. - -#### Text2Image - -##### Text2Image Recall Rankings - -|Rank|Submission |Team |Hardware |Status |R@10 | -|----|-------------------------------------------------------|------------------------------|-----------------------|--------|-------------| -| 1|[diskann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/diskann-bare-metal/README.md) |Microsoft Research India(*org*) |Dell PowerEdge |final|**0.98130** | -| 2|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md) |Intel |Intel Optane |final|**0.97340** | -| 3|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md) |NVidia |NVidia GPU |final|**0.94692** | -| 4|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |final|**0.92855** | -| 5|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |final|**0.86028** | -| 6|[-](-) |- |- |-|**-** | - -* The operational point for ranking is 2000 QPS. We will use the highest recall for the search parameters that meet or exceed 2000 QPS. -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *eval* = final submissions are being evaluated. - * *final* = final submission and ranking. - -##### Text2Image Throughput Rankings - -|Rank|Submission |Team |Hardware |Status |Q/S | -|----|-------------------------------------------------------|------------------------------|-----------------------|--------|------------| -| 1|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md) |NVidia |NVidia GPU |final|**19,094** | -| 2|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md) |Intel |Intel Optane |final|**17,063** | -| 3|[diskann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/diskann-bare-metal/README.md) |Microsoft Research India(*org*) |Dell PowerEdge |final|**9,307** | -| 4|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |final|**4,838** | -| 5|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |final|**1,789** | -| 6|[-](-) |- |- |-|**-** | - -* The operational point for ranking is 0.860 recall@10. We will use the highest throughput for the search parameters that meet or exceed 0.860 recall@10. -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *eval* = final submissions are being evaluated. - * *final* = final submission and ranking. - -##### Text2Image Power Rankings - -|Rank|Submission |Team |Hardware |Status |W*S/Q | -|----|-------------------------------------------------------|------------------------------|-----------------------|--------|-----------| -| 1|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md) |Intel |Intel Optane |final|**0.0446**| -| 2|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md) |NVidia |NVidia GPU |final|**0.0480**| -| 3|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |final|**0.1128**| -| 4|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |final|**0.1242**| -| 5|[-](-) |- |- |-|**-**| -| 6|[-](-) |- |- |-|**-**| - -* The operational point for ranking is 0.86 recall@10. We will use the lowest power consumption for the search parameters that meet or exceed 0.86 recall@10. -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *eval* = final submissions are being evaluated. - * *final* = final submission and ranking. - -##### Text2Image Cost Rankings - -|Rank|Submission |Team |Hardware |Status |Cost |capex |opex |unit cost|units@100K qps|KwH*4yrs | -|----|---------------------|------------------------------|---------------------|--------|--------------|--------|--------|---------|--------------|---------| -| 1|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md) |Intel |Intel Optane |final|**$103,599.49** |$87,985.20|$15,614.29|$14,664.20 |6 |156,142.873| -| 2|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md) |NVidia |NVidia GPU |final|**$916,823.34\*\*** |$900,000.00|$16,823.34|$150,000.00 |6 |168,233.421| -| 3|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |final|**$1,213,773.56** |$1,170,259.86|$43,513.70|$55,726.66 |21 |435,137.010| -| 4|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |final|**$1,272,735.86** |$1,233,226.40|$39,509.46|$22,021.90 |56 |395,094.625| -| 5|[-](-) |- |- |-|**-** |-|-|- |- |-| -| 6|[-](-) |- |- |-|**-** |-|-|- |- |-| - -* The operational point for ranking is 0.86 recall@10. We will use the lowest power consumption/query for the search parameters that meet or exceed 0.86 recall@10. -* The formula is based on: - * Take the algorithm's throughput submitted to leaderboard, use it to scale no. of systems needed to scale to 100K qps (using ceiling to round up any decimal.) - * Capex = cost per system * scale no. - * Take w*s/q from algorithm's power metric submitted to leaderboard and convert to KwH/q. - * Multiply by total queries at 100K qps for 4 years = 4x365x24x60x60x100000 total queries over 4 years. - * Opex = (total queries over 4 years) * KwH/query * $0.10/KwH -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *eval* = final submissions are being evaluated. - * *final* = final submission and ranking. -* \*\*Nvidia has not yet approved the MSRP cost used for ranking, so participation in this benchmark is still pending. - -#### FBSimSearchNet - -##### FBSimSearchNet AP Rankings - -|Rank|Submission |Team |Hardware |Status |AP | -|----|-------------------------------------------------------|------------------------------|-----------------------|--------|-------------| -| 1|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |final|**0.99684** | -| 2|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |final|**0.97863** | -| 3|[-](-) |- |- |-|**-** | -| 4|[-](-) |- |- |-|**-** | -| 5|[-](-) |- |- |-|**-** | -| 6|[-](-) |- |- |-|**-** | - -* The operational point for ranking is 2000 QPS. We will use the highest recall for the search parameters that meet or exceed 2000 QPS. -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *eval* = final submissions are being evaluated. - * *final* = final submission and ranking. - -##### FBSimSearchNet Throughput Rankings - -|Rank|Submission |Team |Hardware |Status |Q/S | -|----|-------------------------------------------------------|------------------------------|-----------------------|--------|------------| -| 1|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |final|**9,345** | -| 2|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |final|**5,699** | -| 3|[-](-) |- |- |-|**-** | -| 4|[-](-) |- |- |-|**-** | -| 5|[-](-) |- |- |-|**-** | -| 6|[-](-) |- |- |-|**-** | - -* The operational point for ranking is 0.9 recall@10. We will use the highest throughput for the search parameters that meet or exceed 0.9 average precision. -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *eval* = final submissions are being evaluated. - * *final* = final submission and ranking. - - -##### FBSimSearchNet Power Rankings - -|Rank|Submission |Team |Hardware |Status |W*S/Q | -|----|-------------------------------------------------------|------------------------------|-----------------------|--------|-----------| -| 1|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |final|**0.0469**| -| 2|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |final|**0.0904**| -| 3|[-](-) |- |- |-|**-**| -| 4|[-](-) |- |- |-|**-**| -| 5|[-](-) |- |- |-|**-**| -| 6|[-](-) |- |- |-|**-**| - -* The operational point for ranking is 0.9 recall@10. We will use the lowest power consumption for the search parameters that meet or exceed 0.9 average precision. -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *eval* = final submissions are being evaluated. - * *final* = final submission and ranking. - -##### FBSimSearchNet Cost Rankings - -|Rank|Submission |Team |Hardware |Status |Cost |capex |opex |unit cost|units@100K qps|KwH*4yrs | -|----|--------------------|------------------------------|---------------------|--------|--------------|--------|--------|---------|--------------|---------| -| 1|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md)|Facebook Research(*org*) |NVidia GPU |final|**$428,074.79** |$396,394.20|$31,680.59|$22,021.90 |18 |316,805.859| -| 2|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md)|GSI Technology(*org*) |LedaE APU |final|**$629,442.91** |$612,993.26|$16,449.65|$55,726.66 |11 |164,496.451| -| 3|[-](-)|- |- |-|**-** | -| -| -| -| -| -| 4|[-](-)|- |- |-|**-** | -| -| -| -| -| - -* The operational point for ranking is 0.9 recall@10. We will use the lowest power consumption/query for the search parameters that meet or exceed 0.9 recall@10. -* The formula is based on: - * Take the algorithm's throughput submitted to leaderboard, use it to scale no. of systems needed to scale to 100K qps (using ceiling to round up any decimal.) - * Capex = cost per system * scale no. - * Take w*s/q from algorithm's power metric submitted to leaderboard and convert to KwH/q. - * Multiply by total queries at 100K qps for 4 years = 4x365x24x60x60x100000 total queries over 4 years. - * Opex = (total queries over 4 years) * KwH/query * $0.10/KwH -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *eval* = final submissions are being evaluated. - * *final* = final submission and ranking. -* \*\*Nvidia has not yet approved the MSRP cost used for ranking, so participation in this benchmark is still pending. diff --git a/t3/LEADERBOARDS_REJECT_ANOMALIES.md b/t3/LEADERBOARDS_REJECT_ANOMALIES.md deleted file mode 100644 index ab8ce9cef..000000000 --- a/t3/LEADERBOARDS_REJECT_ANOMALIES.md +++ /dev/null @@ -1,571 +0,0 @@ -# T3 Track Leaderboards After Rejecting Anomalies (Unofficial) - -Please note that all rankings are currently unofficial due to the following reasons: -* All [open tasks and issues](TASKS_ISSUES_RESOLUTIONS.md) must be resolved. - -## Final Rankings On Private Query Set - -*Not yet available* - -## Rankings On Public Query Set - -### Rankings By Submission Name (alphabetical) - -|Submission |Team |Hardware |[Recall Rank](#recall-or-ap-rankings)|[Thru-put Rank](#throughput-rankings)|[Power Rank](#power-rankings)|[Cost Rank](#cost-rankings)|Status |Anomalies|Evaluator|Algo |Runs | -|--------------------|-----------|----------|---------|---------|---------|--------|---------|---------|---------|---------|--------| -|[diskann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/diskann-bare-metal/README.md) |Microsoft Research India(*org*) |Dell PowerEdge |[1](#recall-or-ap-rankings) |[5](#throughput-rankings) |*NQ* |*NQ* |final |*NA* |Harsha Simhadri |[src](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/benchmark/algorithms/diskann-t2.py) |[nb](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/diskann-bare-metal/EvalPublic.ipynb) | -|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |[6](#recall-or-ap-rankings) |[6](#throughput-rankings) |[5](#power-rankings) |[5](#cost-rankings) |final |0/58 |George Williams |[src](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/benchmark/algorithms/faiss_t3.py) |[nb](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/EvalPublic.ipynb) | -|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |[4](#recall-or-ap-rankings) |[4](#throughput-rankings) |[4](#power-rankings) |[4](#cost-rankings) |final |0/60 |George Williams |[src](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/benchmark/algorithms/gemini.py) |[nb](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/EvalPublic.ipynb) | -|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md) |NVidia |NVidia GPU |[3](#recall-or-ap-rankings) |[3](#throughput-rankings) |[1](#power-rankings) |[2](#cost-rankings)\*\* |final |[5/50](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/ANOMALIES.md) |George Williams |[src](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/benchmark/algorithms/cuanns_ivfpq.py) |[nb](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/EvalPublic.ipynb) | -|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md) |NVidia |NVidia GPU |[5](#recall-or-ap-rankings) |[1](#throughput-rankings) |[3](#power-rankings) |[3](#cost-rankings)\*\* |final |[4/40](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/ANOMALIES.md) |George Williams |[src](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/benchmark/algorithms/cuanns_multigpu.py) |[nb](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_multigpu/EvalPublic.ipynb) | -|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md)|Intel |Intel Optane |[2](#recall-or-ap-rankings) |[2](#throughput-rankings) |[2](#power-rankings) |[1](#cost-rankings)|final |[5/50](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/ANOMALIES.md) |George Williams |[src](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/benchmark/algorithms/graphann.py) |[nb](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/EvalPublic.ipynb)| - -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *final* = final submission - * *inprog* = algorithm development still in progress - * *NQ* = not qualified - * *NA* = data is not yet available, or has not yet been processed - -* *Anomalies* are defined as queries that could potentially be the result of query response caching, a violation of the competition. Our detection method looks for a 30% or more improvement in the batch query latency between the first and last query of a query group (5). Participants have been given a chance to explain why detected anomalies (if any) are not a result of query response caching. - -* \*\*Nvidia has not yet approved the MSRP cost used for ranking, so participation in this benchmark is still pending. - -### Rankings Per Metric - -#### Recall Or AP Rankings - -|Rank|Submission |Team |Hardware|Status |Score |[Deep1B](#deep1B-recall-rankings)|[BigANN](#bigann-recall-rankings)|[MSTuring](#msturing-recall-rankings)|[MSSpace](#msspace-recall-rankings)|[Text2Image](#text2image-recall-rankings)|[FBSSNet](#fbsimsearchnet-ap-rankings)| -|----|------------------|-------|--------|-------|-----------|------|------|--------|-------|----------|-------| -| 1|[diskann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/diskann-bare-metal/README.md)|Microsoft Research India(*org*)|Dell PowerEdge |final|**0.410**|[0.99821](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/diskann-bare-metal/deep-1B_recall.png) |[0.99976](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/diskann-bare-metal/bigann-1B_recall.png) |[0.99444](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/diskann-bare-metal/msturing-1B_recall.png) |[0.99342](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/diskann-bare-metal/msspacev-1B_recall.png) |[0.98130](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/diskann-bare-metal/text2image-1B_recall.png) |- | -| 2|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md)|Intel|Intel Optane |final|**0.409**|[0.99882](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/deep-1B_recall.png) |[0.99978](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/bigann-1B_recall.png) |[0.99568](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/msturing-1B_recall.png) |[0.99835](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/msspacev-1B_recall.png) |[0.97340](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/text2image-1B_recall.png) |- | -| 3|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md)|NVidia|NVidia GPU |final|**0.368**|[0.99541](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/deep-1B_recall.png) |[0.99882](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/bigann-1B_recall.png) |[0.98993](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/msturing-1B_recall.png) |[0.99428](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/msspacev-1B_recall.png) |[0.94691](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/text2image-1B_recall.png) |- | -| 4|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md)|GSI Technology(*org*)|LedaE APU |final|**0.339**|[0.99208](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/deep-1B_recall.png) |[0.99328](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/bigann-1B_recall.png) |[0.97841](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/msturing-1B_recall.png) |[0.98622](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/msspacev-1B_recall.png) |[0.92855](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/text2image-1B_recall.png) |[0.99684](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/ssnpp-1B_recall.png) | -| 5|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md)|NVidia|NVidia GPU |final|**0.166**|[0.95736](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_multigpu/deep-1B_recall.png) |[0.96750](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_multigpu/bigann-1B_recall.png) |[0.96286](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_multigpu/msturing-1B_recall.png) |[0.97541](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_multigpu/msspacev-1B_recall.png) |- |- | -| 6|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md)|Facebook Research(*org*)|NVidia GPU |final|**baseline**|[0.94275](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/deep-1B_recall.png) |[0.93260](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/bigann-1B_recall.png) |[0.91322](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/msturing-1B_recall.png) |[0.90853](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/msspacev-1B_recall.png) |[0.86028](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/text2image-1B_recall.png) |[0.97863](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/ssnpp-1B_recall.png) | - -* A submission must support at least 3 databases to qualify for this ranking. -* The ranking is based on the score, which is the sum of benchmark improvements of qualifying databases (shown in specific database columns after the score column.) -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *final* = final submission - * *inprog* = algorithm development still in progress - -#### Throughput Rankings - -|Rank|Submission |Team |Hardware|Status |Score |[Deep1B](#deep1B-throughput-rankings)|[BigANN](#bigann-throughput-rankings)|[MSTuring](#msturing-throughput-rankings)|[MSSpace](#msspace-throughput-rankings)|[Text2Image](#text2image-throughput-rankings)|[FBSSNet](#fbsimsearchnet-throughput-rankings)| -|----|------------------|-------|--------|-------|-----------|------|------|--------|-------|----------|--------------| -| 1|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md)|NVidia|NVidia GPU |final|**3001623.821**|[816,807](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_multigpu/deep-1B_throughput.png) |[767,653](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_multigpu/bigann-1B_throughput.png) |[586,722](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_multigpu/msturing-1B_throughput.png) |[844,287](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_multigpu/msspacev-1B_throughput.png) |- |- | -| 2|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md)|Intel|Intel Optane |final|**851327.044**|[196,546](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/deep-1B_throughput.png) |[335,991](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/bigann-1B_throughput.png) |[161,463](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/msturing-1B_throughput.png) |[155,899](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/msspacev-1B_throughput.png) |[17,063](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/text2image-1B_throughput.png) |- | -| 3|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md)|NVidia|NVidia GPU |final|**401541.475**|[91,938](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/deep-1B_throughput.png) |[85,446](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/bigann-1B_throughput.png) |[110,830](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/msturing-1B_throughput.png) |[109,621](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/msspacev-1B_throughput.png) |[19,340](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/text2image-1B_throughput.png) |- | -| 4|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md)|GSI Technology(*org*)|LedaE APU |final|**52429.395**|[10,704](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/deep-1B_throughput.png) |[10,672](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/bigann-1B_throughput.png) |[21,780](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/msturing-1B_throughput.png) |[16,422](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/msspacev-1B_throughput.png) |[4,838](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/text2image-1B_throughput.png) |[9,345](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/ssnpp-1B_throughput.png) | -| 5|[diskann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/diskann-bare-metal/README.md)|Microsoft Research India(*org*)|Dell PowerEdge |final|**49398.127**|[12,927](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/diskann-bare-metal/deep-1B_throughput.png) |[19,094](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/diskann-bare-metal/bigann-1B_throughput.png) |[17,201](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/diskann-bare-metal/msturing-1B_throughput.png) |[6,503](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/diskann-bare-metal/msspacev-1B_throughput.png) |[9,307](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/diskann-bare-metal/text2image-1B_throughput.png) |- | -| 6|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md)|Facebook Research(*org*)|NVidia GPU |final|**baseline**|[4,464](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/deep-1B_throughput.png) |[3,271](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/bigann-1B_throughput.png) |[2,845](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/msturing-1B_throughput.png) |[3,265](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/msspacev-1B_throughput.png) |[1,789](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/text2image-1B_throughput.png) |[5,699](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/ssnpp-1B_throughput.png) | - -* A submission must support at least 3 databases to qualify for this ranking. -* The ranking is based on the score, which is the sum of benchmark improvements of qualifying databases (shown in specific database columns after the score column.) -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *final* = final submission - * *inprog* = algorithm development still in progress - -#### Power Rankings - -|Rank|Submission |Team |Hardware|Status |Score |[Deep1B](#deep1B-power-rankings)|[BigANN](#bigann-power-rankings)|[MSTuring](#msturing-power-rankings)|[MSSpace](#msspace-power-rankings)|[Text2Image](#text2image-power-rankings)|[FBSSNet](#fbsimsearchnet-power-rankings)| -|----|------------------|-------|--------|-------|-----------|------|------|--------|-------|-----|-----| -| 1|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md)|NVidia|NVidia GPU |final|**-0.691**|[0.0024](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/deep-1B_power.png) |[0.0023](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/bigann-1B_power.png) |[0.0016](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/msturing-1B_power.png) |[0.0017](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/msspacev-1B_power.png) |[0.0094](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/text2image-1B_power.png)|-| -| 2|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md)|Intel|Intel Optane |final|**-0.648**|[0.0041](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/deep-1B_power.png) |[0.0022](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/bigann-1B_power.png) |[0.0048](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/msturing-1B_power.png) |[0.0050](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/msspacev-1B_power.png) |[0.0446](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/text2image-1B_power.png)|-| -| 3|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md)|NVidia|NVidia GPU |final|**-0.594**|[0.0002](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_multigpu/deep-1B_power.png) |[0.0003](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_multigpu/bigann-1B_power.png) |[0.0004](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_multigpu/msturing-1B_power.png) |[0.0002](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_multigpu/msspacev-1B_power.png) |-|-| -| 4|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md)|GSI Technology(*org*)|LedaE APU |final|**-0.513**|[0.0337](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/deep-1B_power.png) |[0.0341](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/bigann-1B_power.png) |[0.0236](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/msturing-1B_power.png) |[0.0230](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/msspacev-1B_power.png) |[0.1242](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/text2image-1B_power.png)|[0.0469](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/ssnpp-1B_power.png)| -| 5|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/diskann-bare-metal/README.md)|Facebook Research(*org*)|NVidia GPU |final|**baseline**|[0.1117](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/deep-1B_power.png) |[0.1576](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/bigann-1B_power.png) |[0.1743](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/msturing-1B_power.png) |[0.1520](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/msspacev-1B_power.png) |[0.1128](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/text2image-1B_power.png)|[0.0904](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/ssnpp-1B_power.png)| -| 6|[-](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md)|-|- |-|**-**|- |- |- |- |-|-| - -* A submission must support at least 3 databases to qualify for this ranking. -* The ranking is based on the score, which is the sum of benchmark improvements of qualifying databases (shown in specific database columns after the score column.) -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *final* = final submission - * *inprog* = algorithm development still in progress - -#### Cost Rankings - -|Rank|Submission |Team |Hardware|Status |Score |[Deep1B](#deep1B-cost-rankings)|[BigANN](#bigann-cost-rankings)|[MSTuring](#msturing-cost-rankings)|[MSSpace](#msspace-cost-rankings)|[Text2Image](#text2image-cost-rankings)|[FBSSNet](#fbsimsearchnet-cost-rankings)| -|----|------------------|-------|--------|-------|-----------|------|------|--------|-------|----------|--------------| -| 1|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md)|Intel|Intel Optane |final|**$-3,978,172.56**|$16,086.82 |$15,439.92 |$16,347.45 |$16,409.08 |$103,599.49 |- | -| 2|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md)|NVidia|NVidia GPU |final|**$-2,339,919.09\*\***|$300,843.83 |$300,815.92 |$150,563.49 |$150,605.68 |$903,307.30 |- | -| 3|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md)|NVidia|NVidia GPU |final|**$-2,272,942.67\*\***|$150,082.04 |$150,088.58 |$150,127.39 |$150,078.78 |- |- | -| 4|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md)|GSI Technology(*org*)|LedaE APU |final|**$-907,570.13**|$569,058.09 |$569,210.35 |$286,911.87 |$398,163.18 |$1,213,773.56 |$629,442.91 | -| 5|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md)|Facebook Research(*org*)|NVidia GPU |final|**baseline**|$545,633.16 |$737,886.17 |$853,857.46 |$735,942.66 |$1,272,735.86 |$428,074.79 | -| 6|[-](-)|-|- |-|**-**|- |- |- |- |- |- | - -* A submission must support at least 3 databases to qualify for this ranking. -* The ranking is based on the score, which is the sum of benchmark improvements of qualifying databases (shown in specific database columns after the score column.) -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *final* = final submission - * *inprog* = algorithm development still in progress - -* \*\*Nvidia has not yet approved the MSRP cost used for ranking, so participation in this benchmark is still pending. - -### Rankings Per Database - -#### Deep1B - -##### Deep1B Recall Rankings - -|Rank|Submission |Team |Hardware |Status |R@10 | -|----|-------------------------------------------------------|------------------------------|-----------------------|--------|-----------| -| 1|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md) |Intel |Intel Optane |final|**[0.99882](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/deep-1B_recall.png)**| -| 2|[diskann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/diskann-bare-metal/README.md) |Microsoft Research India(*org*) |Dell PowerEdge |final|**[0.99821](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/diskann-bare-metal/deep-1B_recall.png)**| -| 3|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md) |NVidia |NVidia GPU |final|**[0.99541](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/deep-1B_recall.png)**| -| 4|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |final|**[0.99208](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/deep-1B_recall.png)**| -| 5|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md) |NVidia |NVidia GPU |final|**[0.95736](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_multigpu/deep-1B_recall.png)**| -| 6|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |final|**[0.94275](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/deep-1B_recall.png)**| - -* The operational point for ranking is 2000 QPS. We will use the highest recall for the search parameters that meet or exceed 2000 QPS. -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *final* = final submission - * *inprog* = algorithm development still in progress - -##### Deep1B Throughput Rankings - -|Rank|Submission |Team |Hardware |Status |Q/S | -|----|-------------------------------------------------------|------------------------------|-----------------------|--------|-----------| -| 1|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md) |NVidia |NVidia GPU |final|**[816,807](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_multigpu/deep-1B_throughput.png)**| -| 2|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md) |Intel |Intel Optane |final|**[196,546](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/deep-1B_throughput.png)**| -| 3|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md) |NVidia |NVidia GPU |final|**[91,938](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/deep-1B_throughput.png)**| -| 4|[diskann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/diskann-bare-metal/README.md) |Microsoft Research India(*org*) |Dell PowerEdge |final|**[12,927](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/diskann-bare-metal/deep-1B_throughput.png)**| -| 5|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |final|**[10,704](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/deep-1B_throughput.png)**| -| 6|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |final|**[4,464](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/deep-1B_throughput.png)**| - -* The operational point for ranking is 0.90 recall@10. We will use the highest throughput for the search parameters that meet or exceed 0.90 recall@10. -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *final* = final submission - * *inprog* = algorithm development still in progress - -##### Deep1B Power Rankings - -|Rank|Submission |Team |Hardware |Status |W*S/Q | -|----|-------------------------------------------------------|------------------------------|-----------------------|--------|-----------| -| 1|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md) |NVidia |NVidia GPU |final|**[0.0002](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_multigpu/deep-1B_power.png)**| -| 2|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md) |NVidia |NVidia GPU |final|**[0.0024](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/deep-1B_power.png)**| -| 3|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md) |Intel |Intel Optane |final|**[0.0041](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/deep-1B_power.png)**| -| 4|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |final|**[0.0337](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/deep-1B_power.png)**| -| 5|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |final|**[0.1117](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/deep-1B_power.png)**| -| 6|[-](-) |- |- |-|**-**| - -* The operational point for ranking is 0.90 recall@10. We will use the lowest power consumption for the search parameters that meet or exceed 0.90 recall@10. -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *final* = final submission - * *inprog* = algorithm development still in progress - -##### Deep1B Cost Rankings - -|Rank|Submission |Team |Hardware |Status |Cost |capex |opex |unit cost|units@100K qps|KwH*4yrs | -|----|--------------------|---------|-----------------------|--------|-------------|--------|--------|---------|--------------|---------| -| 1|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md)|Intel |Intel Optane |final|**$16,086.82** |$14,664.20|$1,422.62|$14,664.20 |1 |14,226.208| -| 2|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md)|NVidia |NVidia GPU |final|**$150,082.04\*\*** |$150,000.00|$82.04|$150,000.00 |1 |820.405| -| 3|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md)|NVidia |NVidia GPU |final|**$300,843.83\*\*** |$300,000.00|$843.83|$150,000.00 |2 |8,438.315| -| 4|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md)|Facebook Research(*org*) |NVidia GPU |final|**$545,633.16** |$506,503.70|$39,129.46|$22,021.90 |23 |391,294.584| -| 5|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md)|GSI Technology(*org*) |LedaE APU |final|**$569,058.09** |$557,266.60|$11,791.49|$55,726.66 |10 |117,914.908| -| 6|[-](-)|- |- |-|**-** |-|-|- |- |-| - -* The operational point for ranking is 0.90 recall@10. We will use the lowest power consumption/query for the search parameters that meet or exceed 0.90 recall@10. -* The formula is based on: - * Take the algorithm's throughput submitted to leaderboard, use it to scale no. of systems needed to scale to 100K qps (using ceiling to round up any decimal.) - * Capex = cost per system * scale no. - * Take w*s/q from algorithm's power metric submitted to leaderboard and convert to KwH/q. - * Multiply by total queries at 100K qps for 4 years = 4x365x24x60x60x100000 total queries - * Opex = (total queries over 4 years) * KwH/query * $0.10/KwH -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *final* = final submission - * *inprog* = algorithm development still in progress -* \*\*Nvidia has not yet approved the MSRP cost used for ranking, so participation in this benchmark is still pending. - -#### BigANN - -##### BigANN Recall Rankings - -|Rank|Submission |Team |Hardware |Status |R@10 | -|----|-------------------------------------------------------|------------------------------|-----------------------|--------|-------------| -| 1|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md) |Intel |Intel Optane |final|**[0.99978](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/bigann-1B_recall.png)** | -| 2|[diskann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/diskann-bare-metal/README.md) |Microsoft Research India(*org*) |Dell PowerEdge |final|**[0.99976](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/diskann-bare-metal/bigann-1B_recall.png)** | -| 3|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md) |NVidia |NVidia GPU |final|**[0.99882](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/bigann-1B_recall.png)** | -| 4|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |final|**[0.99328](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/bigann-1B_recall.png)** | -| 5|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md) |NVidia |NVidia GPU |final|**[0.96750](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_multigpu/bigann-1B_recall.png)** | -| 6|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |final|**[0.93260](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/bigann-1B_recall.png)** | - -* The operational point for ranking is 2000 QPS. We will use the highest recall for the search parameters that meet or exceed 2000 QPS. -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *final* = final submission - * *inprog* = algorithm development still in progress - -##### BigANN Throughput Rankings - -|Rank|Submission |Team |Hardware |Status |Q/S | -|----|-------------------------------------------------------|------------------------------|-----------------------|--------|-------------| -| 1|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md) |NVidia |NVidia GPU |final|**[767,653](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_multigpu/bigann-1B_throughput.png)** | -| 2|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md) |Intel |Intel Optane |final|**[335,991](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/bigann-1B_throughput.png)** | -| 3|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md) |NVidia |NVidia GPU |final|**[85,446](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/bigann-1B_throughput.png)** | -| 4|[diskann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/diskann-bare-metal/README.md) |Microsoft Research India(*org*) |Dell PowerEdge |final|**[19,094](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/diskann-bare-metal/bigann-1B_throughput.png)** | -| 5|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |final|**[10,672](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/bigann-1B_throughput.png)** | -| 6|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |final|**[3,271](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/bigann-1B_throughput.png)** | - -* The operational point for ranking is 0.90 recall@10. We will use the highest throughput for the search parameters that meet or exceed 0.90 recall@10. -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *final* = final submission - * *inprog* = algorithm development still in progress - -##### BigANN Power Rankings - -|Rank|Submission |Team |Hardware |Status |W*S/Q | -|----|-------------------------------------------------------|------------------------------|-----------------------|--------|-----------| -| 1|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md) |NVidia |NVidia GPU |final|**[0.0003](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_multigpu/bigann-1B_power.png)**| -| 2|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md) |Intel |Intel Optane |final|**[0.0022](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/bigann-1B_power.png)**| -| 3|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md) |NVidia |NVidia GPU |final|**[0.0023](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/bigann-1B_power.png)**| -| 4|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |final|**[0.0341](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/bigann-1B_power.png)**| -| 5|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |final|**[0.1576](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/bigann-1B_power.png)**| -| 6|[-](-) |- |- |-|**-**| - -* The operational point for ranking is 0.90 recall@10. We will use the lowest power consumption for the search parameters that meet or exceed 0.90 recall@10. -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *final* = final submission - * *inprog* = algorithm development still in progress - -##### BigANN Cost Rankings - -|Rank|Submission |Team |Hardware |Status |Cost |capex |opex |unit cost|units@100K qps|KwH*4yrs | -|----|--------------------|------------------------------|--------------------|--------|--------------|--------|--------|---------|--------------|---------| -| 1|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md)|Intel |Intel Optane |final|**$15,439.92** |$14,664.20|$775.72|$14,664.20 |1 |7,757.221| -| 2|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md)|NVidia |NVidia GPU |final|**$150,088.58\*\*** |$150,000.00|$88.58|$150,000.00 |1 |885.770| -| 3|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md)|NVidia |NVidia GPU |final|**$300,815.92\*\*** |$300,000.00|$815.92|$150,000.00 |2 |8,159.226| -| 4|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md)|GSI Technology(*org*) |LedaE APU |final|**$569,210.35** |$557,266.60|$11,943.75|$55,726.66 |10 |119,437.537| -| 5|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md)|Facebook Research(*org*) |NVidia GPU |final|**$737,886.17** |$682,678.90|$55,207.27|$22,021.90 |31 |552,072.703| -| 6|[-](-)|- |- |-|**-** |-|-|- |- |-| - -* The operational point for ranking is 0.90 recall@10. We will use the lowest power consumption/query for the search parameters that meet or exceed 0.90 recall@10. -* The formula is based on: - * Take the algorithm's throughput submitted to leaderboard, use it to scale no. of systems needed to scale to 100K qps (using ceiling to round up any decimal.) - * Capex = cost per system * scale no. - * Take w*s/q from algorithm's power metric submitted to leaderboard and convert to KwH/q. - * Multiply by total queries at 100K qps for 4 years = 4x365x24x60x60x100000 total queries over 4 years. - * Opex = (total queries over 4 years) * KwH/query * $0.10/KwH -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *final* = final submission - * *inprog* = algorithm development still in progress -* \*\*Nvidia has not yet approved the MSRP cost used for ranking, so participation in this benchmark is still pending. - -#### MSTuring - -##### MSTuring Recall Rankings - -|Rank|Submission |Team |Hardware |Status |R@10 | -|----|-------------------------------------------------------|------------------------------|-----------------------|--------|---------------| -| 1|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md) |Intel |Intel Optane |final|**[0.99568](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/msturing-1B_recall.png)** | -| 2|[diskann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/diskann-bare-metal/README.md) |Microsoft Research India(*org*) |Dell PowerEdge |final|**[0.99444](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/diskann-bare-metal/msturing-1B_recall.png)** | -| 3|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md) |NVidia |NVidia GPU |final|**[0.98993](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/msturing-1B_recall.png)** | -| 4|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |final|**[0.97841](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/msturing-1B_recall.png)** | -| 5|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md) |NVidia |NVidia GPU |final|**[0.96286](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_multigpu/msturing-1B_recall.png)** | -| 6|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |final|**[0.91322](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/msturing-1B_recall.png)** | - -* The operational point for ranking is 2000 QPS. We will use the highest recall for the search parameters that meet or exceed 2000 QPS. -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *final* = final submission - * *inprog* = algorithm development still in progress - -##### MSTuring Throughput Rankings - -|Rank|Submission |Team |Hardware |Status |Q/S | -|----|-------------------------------------------------------|------------------------------|-----------------------|--------|------------| -| 1|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md) |NVidia |NVidia GPU |final|**[586,722](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_multigpu/msturing-1B_throughput.png)** | -| 2|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md) |Intel |Intel Optane |final|**[161,463](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/msturing-1B_throughput.png)** | -| 3|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md) |NVidia |NVidia GPU |final|**[110,830](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/msturing-1B_throughput.png)** | -| 4|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |final|**[21,780](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/msturing-1B_throughput.png)** | -| 5|[diskann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/diskann-bare-metal/README.md) |Microsoft Research India(*org*) |Dell PowerEdge |final|**[17,201](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/diskann-bare-metal/msturing-1B_throughput.png)** | -| 6|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |final|**[2,845](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/msturing-1B_throughput.png)** | - -* The operational point for ranking is 0.90 recall@10. We will use the highest throughput for the search parameters that meet or exceed 0.90 recall@10. -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *final* = final submission - * *inprog* = algorithm development still in progress - -##### MSTuring Power Rankings - -|Rank|Submission |Team |Hardware |Status|W*S/Q | -|----|-------------------------------------------------------|------------------------------|-----------------------|------|--------------| -| 1|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md) |NVidia |NVidia GPU |final|**[0.0004](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_multigpu/msturing-1B_power.png)** | -| 2|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md) |NVidia |NVidia GPU |final|**[0.0016](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/msturing-1B_power.png)** | -| 3|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md) |Intel |Intel Optane |final|**[0.0048](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/msturing-1B_power.png)** | -| 4|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |final|**[0.0236](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/msturing-1B_power.png)** | -| 5|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |final|**[0.1743](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/msturing-1B_power.png)** | -| 6|[-](-) |- |- |-|**-** | - -* The operational point for ranking is 0.90 recall@10. We will use the lowest power consumption for the search parameters that meet or exceed 0.90 recall@10. -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *final* = final submission - * *inprog* = algorithm development still in progress - -##### MSTuring Cost Rankings - -|Rank|Submission |Team |Hardware |Status |Cost |capex |opex |unit cost|units@100K qps|KwH*4yrs | -|----|------------------------------------|------------------------------|-----------------------|--------|--------------|--------|--------|---------|--------------|----------| -| 1|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md) |Intel |Intel Optane |final|**$16,347.45** |$14,664.20|$1,683.25|$14,664.20 |1 |16,832.451 | -| 2|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md) |NVidia |NVidia GPU |final|**$150,127.39\*\*** |$150,000.00|$127.39|$150,000.00 |1 |1,273.870 | -| 3|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md) |NVidia |NVidia GPU |final|**$150,563.49\*\*** |$150,000.00|$563.49|$150,000.00 |1 |5,634.885 | -| 4|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |final|**$286,911.87** |$278,633.30|$8,278.57|$55,726.66 |5 |82,785.683 | -| 5|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |final|**$853,857.46** |$792,788.40|$61,069.06|$22,021.90 |36 |610,690.611 | -| 6|[-](-) |- |- |-|**-** |-|-|- |- |- | - -* The operational point for ranking is 0.90 recall@10. We will use the lowest power consumption/query for the search parameters that meet or exceed 0.90 recall@10. -* The formula is based on: - * Take the algorithm's throughput submitted to leaderboard, use it to scale no. of systems needed to scale to 100K qps (using ceiling to round up any decimal.) - * Capex = cost per system * scale no. - * Take w*s/q from algorithm's power metric submitted to leaderboard and convert to KwH/q. - * Multiply by total queries at 100K qps for 4 years = 4x365x24x60x60x100000 total queries over 4 years. - * Opex = (total queries over 4 years) * KwH/query * $0.10/KwH -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *final* = final submission - * *inprog* = algorithm development still in progress -* \*\*Nvidia has not yet approved the MSRP cost used for ranking, so participation in this benchmark is still pending. - -#### MSSpace - -##### MSSpace Recall Rankings - -|Rank|Submission |Team |Hardware |Status |R@10 | -|----|-------------------------------------------------------|------------------------------|-----------------------|--------|---------| -| 1|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md) |Intel |Intel Optane |final|**[0.99835](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/msspacev-1B_recall.png)** | -| 2|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md) |NVidia |NVidia GPU |final|**[0.99428](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/msspacev-1B_recall.png)** | -| 3|[diskann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/diskann-bare-metal/README.md) |Microsoft Research India(*org*) |Dell PowerEdge |final|**[0.99342](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/diskann-bare-metal/msspacev-1B_recall.png)** | -| 4|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |final|**[0.98622](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/msspacev-1B_recall.png)** | -| 5|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md) |NVidia |NVidia GPU |final|**[0.97541](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_multigpu/msspacev-1B_recall.png)** | -| 6|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |$MS6r_ST|**[0.90853](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/msspacev-1B_recall.png)** | - -* The operational point for ranking is 2000 QPS. We will use the highest recall for the search parameters that meet or exceed 2000 QPS. -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *final* = final submission - * *inprog* = algorithm development still in progress - -##### MSSpace Throughput Rankings - -|Rank|Submission |Team |Hardware |Status |Q/S | -|----|-------------------------------------------------------|------------------------------|-----------------------|--------|------------| -| 1|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md) |NVidia |NVidia GPU |final|**[844,287](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_multigpu/msspacev-1B_throughput.png)** | -| 2|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md) |Intel |Intel Optane |final|**[155,899](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/msspacev-1B_throughput.png)** | -| 3|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md) |NVidia |NVidia GPU |final|**[109,621](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/msspacev-1B_throughput.png)** | -| 4|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |final|**[16,422](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/msspacev-1B_throughput.png)** | -| 5|[diskann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/diskann-bare-metal/README.md) |Microsoft Research India(*org*) |Dell PowerEdge |final|**[6,503](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/diskann-bare-metal/msspacev-1B_throughput.png)** | -| 6|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |final|**[3,265](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/msspacev-1B_throughput.png)** | - -* The operational point for ranking is 0.9 recall@10. We will use the highest throughput for the search parameters that meet or exceed 0.9 recall@10. -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *final* = final submission - * *inprog* = algorithm development still in progress - -##### MSSpace Power Rankings - -|Rank|Submission |Team |Hardware |Status |W*S/Q | -|----|-------------------------------------------------------|------------------------------|-----------------------|--------|------------| -| 1|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md) |NVidia |NVidia GPU |final|**[0.0002](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_multigpu/msspacev-1B_power.png)** | -| 2|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md) |NVidia |NVidia GPU |final|**[0.0017](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/msspacev-1B_power.png)** | -| 3|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md) |Intel |Intel Optane |final|**[0.0050](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/msspacev-1B_power.png)** | -| 4|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |final|**[0.0230](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/msspacev-1B_power.png)** | -| 5|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |final|**[0.1520](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/msspacev-1B_power.png)** | -| 6|[-](-) |- |- |-|**-** | - -* The operational point for ranking is 0.9 recall@10. We will use the lowest power consumption for the search parameters that meet or exceed 0.9 recall@10. -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *final* = final submission - * *inprog* = algorithm development still in progress - -##### MSSpace Cost Rankings - -|Rank|Submission |Team |Hardware |Status |Cost |capex |opex |unit cost|units@100K qps|KwH*4yrs | -|----|--------------------|------------------------------|-----------------------|------- |--------------|--------|--------|---------|--------------|----------| -| 1|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md)|Intel |Intel Optane |final|**$16,409.08** |$14,664.20|$1,744.88|$14,664.20 |1 |17,448.764 | -| 2|[cuanns_multigpu](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_multigpu/README.md)|NVidia |NVidia GPU |final|**$150,078.78\*\*** |$150,000.00|$78.78|$150,000.00 |1 |787.774 | -| 3|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md)|NVidia |NVidia GPU |final|**$150,605.68\*\*** |$150,000.00|$605.68|$150,000.00 |1 |6,056.841 | -| 4|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md)|GSI Technology(*org*) |LedaE APU |final|**$398,163.18** |$390,086.62|$8,076.56|$55,726.66 |7 |80,765.638 | -| 5|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md)|Facebook Research(*org*) |NVidia GPU |final|**$735,942.66** |$682,678.90|$53,263.76|$22,021.90 |31 |532,637.584 | -| 6|[-](-)|- |- |-|**-** |-|-|- |- |- | - -* The operational point for ranking is 0.90 recall@10. We will use the lowest power consumption/query for the search parameters that meet or exceed 0.90 recall@10. -* The formula is based on: - * Take the algorithm's throughput submitted to leaderboard, use it to scale no. of systems needed to scale to 100K qps (using ceiling to round up any decimal.) - * Capex = cost per system * scale no. - * Take w*s/q from algorithm's power metric submitted to leaderboard and convert to KwH/q. - * Multiply by total queries at 100K qps for 4 years = 4x365x24x60x60x100000 total queries over 4 years. - * Opex = (total queries over 4 years) * KwH/query * $0.10/KwH -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *final* = final submission - * *inprog* = algorithm development still in progress -* \*\*Nvidia has not yet approved the MSRP cost used for ranking, so participation in this benchmark is still pending. - -#### Text2Image - -##### Text2Image Recall Rankings - -|Rank|Submission |Team |Hardware |Status |R@10 | -|----|-------------------------------------------------------|------------------------------|-----------------------|--------|-------------| -| 1|[diskann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/diskann-bare-metal/README.md) |Microsoft Research India(*org*) |Dell PowerEdge |final|**[0.98130](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/diskann-bare-metal/text2image-1B_recall.png)** | -| 2|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md) |Intel |Intel Optane |final|**[0.97340](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/text2image-1B_recall.png)** | -| 3|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md) |NVidia |NVidia GPU |final|**[0.94691](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/text2image-1B_recall.png)** | -| 4|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |final|**[0.92855](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/text2image-1B_recall.png)** | -| 5|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |final|**[0.86028](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/text2image-1B_recall.png)** | -| 6|[-](-) |- |- |-|**-** | - -* The operational point for ranking is 2000 QPS. We will use the highest recall for the search parameters that meet or exceed 2000 QPS. -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *final* = final submission - * *inprog* = algorithm development still in progress - -##### Text2Image Throughput Rankings - -|Rank|Submission |Team |Hardware |Status |Q/S | -|----|-------------------------------------------------------|------------------------------|-----------------------|--------|------------| -| 1|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md) |NVidia |NVidia GPU |final|**[19,340](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/text2image-1B_throughput.png)** | -| 2|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md) |Intel |Intel Optane |final|**[17,063](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/text2image-1B_throughput.png)** | -| 3|[diskann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/diskann-bare-metal/README.md) |Microsoft Research India(*org*) |Dell PowerEdge |final|**[9,307](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/diskann-bare-metal/text2image-1B_throughput.png)** | -| 4|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |final|**[4,838](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/text2image-1B_throughput.png)** | -| 5|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |final|**[1,789](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/text2image-1B_throughput.png)** | -| 6|[-](-) |- |- |-|**-** | - -* The operational point for ranking is 0.860 recall@10. We will use the highest throughput for the search parameters that meet or exceed 0.860 recall@10. -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *final* = final submission - * *inprog* = algorithm development still in progress - -##### Text2Image Power Rankings - -|Rank|Submission |Team |Hardware |Status |W*S/Q | -|----|-------------------------------------------------------|------------------------------|-----------------------|--------|-----------| -| 1|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md) |NVidia |NVidia GPU |final|**[0.0094](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/cuanns_ivfpq/text2image-1B_power.png)**| -| 2|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md) |Intel |Intel Optane |final|**[0.0446](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/optanne_graphann/text2image-1B_power.png)**| -| 3|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |final|**[0.1128](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/text2image-1B_power.png)**| -| 4|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |final|**[0.1242](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/text2image-1B_power.png)**| -| 5|[-](-) |- |- |-|**-**| -| 6|[-](-) |- |- |-|**-**| - -* The operational point for ranking is 0.86 recall@10. We will use the lowest power consumption for the search parameters that meet or exceed 0.86 recall@10. -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *final* = final submission - * *inprog* = algorithm development still in progress - -##### Text2Image Cost Rankings - -|Rank|Submission |Team |Hardware |Status |Cost |capex |opex |unit cost|units@100K qps|KwH*4yrs | -|----|---------------------|------------------------------|---------------------|--------|--------------|--------|--------|---------|--------------|---------| -| 1|[optanne_graphann](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/optanne_graphann/README.md) |Intel |Intel Optane |final|**$103,599.49** |$87,985.20|$15,614.29|$14,664.20 |6 |156,142.873| -| 2|[cuanns_ivfpq](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/cuanns_ivfpq/README.md) |NVidia |NVidia GPU |final|**$903,307.30\*\*** |$900,000.00|$3,307.30|$150,000.00 |6 |33,072.963| -| 3|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |final|**$1,213,773.56** |$1,170,259.86|$43,513.70|$55,726.66 |21 |435,137.010| -| 4|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |final|**$1,272,735.86** |$1,233,226.40|$39,509.46|$22,021.90 |56 |395,094.625| -| 5|[-](-) |- |- |-|**-** |-|-|- |- |-| -| 6|[-](-) |- |- |-|**-** |-|-|- |- |-| - -* The operational point for ranking is 0.86 recall@10. We will use the lowest power consumption/query for the search parameters that meet or exceed 0.86 recall@10. -* The formula is based on: - * Take the algorithm's throughput submitted to leaderboard, use it to scale no. of systems needed to scale to 100K qps (using ceiling to round up any decimal.) - * Capex = cost per system * scale no. - * Take w*s/q from algorithm's power metric submitted to leaderboard and convert to KwH/q. - * Multiply by total queries at 100K qps for 4 years = 4x365x24x60x60x100000 total queries over 4 years. - * Opex = (total queries over 4 years) * KwH/query * $0.10/KwH -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *final* = final submission - * *inprog* = algorithm development still in progress -* \*\*Nvidia has not yet approved the MSRP cost used for ranking, so participation in this benchmark is still pending. - -#### FBSimSearchNet - -##### FBSimSearchNet AP Rankings - -|Rank|Submission |Team |Hardware |Status |AP | -|----|-------------------------------------------------------|------------------------------|-----------------------|--------|-------------| -| 1|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |final|**[0.99684](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/ssnpp-1B_recall.png)** | -| 2|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |final|**[0.97863](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/ssnpp-1B_recall.png)** | -| 3|[-](-) |- |- |-|**-** | -| 4|[-](-) |- |- |-|**-** | -| 5|[-](-) |- |- |-|**-** | -| 6|[-](-) |- |- |-|**-** | - -* The operational point for ranking is 2000 QPS. We will use the highest recall for the search parameters that meet or exceed 2000 QPS. -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *final* = final submission - * *inprog* = algorithm development still in progress - -##### FBSimSearchNet Throughput Rankings - -|Rank|Submission |Team |Hardware |Status |Q/S | -|----|-------------------------------------------------------|------------------------------|-----------------------|--------|------------| -| 1|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |final|**[9,345](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/ssnpp-1B_throughput.png)** | -| 2|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |final|**[5,699](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/ssnpp-1B_throughput.png)** | -| 3|[-](-) |- |- |-|**-** | -| 4|[-](-) |- |- |-|**-** | -| 5|[-](-) |- |- |-|**-** | -| 6|[-](-) |- |- |-|**-** | - -* The operational point for ranking is 0.9 recall@10. We will use the highest throughput for the search parameters that meet or exceed 0.9 average precision. -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *final* = final submission - * *inprog* = algorithm development still in progress - - -##### FBSimSearchNet Power Rankings - -|Rank|Submission |Team |Hardware |Status |W*S/Q | -|----|-------------------------------------------------------|------------------------------|-----------------------|--------|-----------| -| 1|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md) |GSI Technology(*org*) |LedaE APU |final|**[0.0469](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/gemini/ssnpp-1B_power.png)**| -| 2|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md) |Facebook Research(*org*) |NVidia GPU |final|**[0.0904](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/eval_2021/faiss_t3/ssnpp-1B_power.png)**| -| 3|[-](-) |- |- |-|**-**| -| 4|[-](-) |- |- |-|**-**| -| 5|[-](-) |- |- |-|**-**| -| 6|[-](-) |- |- |-|**-**| - -* The operational point for ranking is 0.9 recall@10. We will use the lowest power consumption for the search parameters that meet or exceed 0.9 average precision. -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *final* = final submission - * *inprog* = algorithm development still in progress - -##### FBSimSearchNet Cost Rankings - -|Rank|Submission |Team |Hardware |Status |Cost |capex |opex |unit cost|units@100K qps|KwH*4yrs | -|----|--------------------|------------------------------|---------------------|--------|--------------|--------|--------|---------|--------------|---------| -| 1|[faiss_t3](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/faiss_t3/README.md)|Facebook Research(*org*) |NVidia GPU |final|**$428,074.79** |$396,394.20|$31,680.59|$22,021.90 |18 |316,805.859| -| 2|[gemini](https://github.com/harsha-simhadri/big-ann-benchmarks/blob/gw/T3/t3/gemini/README.md)|GSI Technology(*org*) |LedaE APU |final|**$629,442.91** |$612,993.26|$16,449.65|$55,726.66 |11 |164,496.451| -| 3|[-](-)|- |- |-|**-** | -| -| -| -| -| -| 4|[-](-)|- |- |-|**-** | -| -| -| -| -| - -* The operational point for ranking is 0.9 recall@10. We will use the lowest power consumption/query for the search parameters that meet or exceed 0.9 recall@10. -* The formula is based on: - * Take the algorithm's throughput submitted to leaderboard, use it to scale no. of systems needed to scale to 100K qps (using ceiling to round up any decimal.) - * Capex = cost per system * scale no. - * Take w*s/q from algorithm's power metric submitted to leaderboard and convert to KwH/q. - * Multiply by total queries at 100K qps for 4 years = 4x365x24x60x60x100000 total queries over 4 years. - * Opex = (total queries over 4 years) * KwH/query * $0.10/KwH -* Abbreviations used in chart: - * *org* = submitted by challenge organizer, so subject to competition restrictions - * *final* = final submission - * *inprog* = algorithm development still in progress -* \*\*Nvidia has not yet approved the MSRP cost used for ranking, so participation in this benchmark is still pending. diff --git a/t3/RANKING.md b/t3/RANKING.md deleted file mode 100644 index 1d8bc88c9..000000000 --- a/t3/RANKING.md +++ /dev/null @@ -1,196 +0,0 @@ -# Explanation of T3 Ranking Method - -This readme outlines the ranking logic used for the T3 track of this competition. - -## Benchmarks - -We measure participants on 4 benchmarks and we maintain a leaderboard for each: -* recall/average precision - recall is based on recall@10 accounting for distance ties -* throughput - based on queries/second -* power consumption - based on measuring watt*seconds/query -* cost - based on a projection of capex + opex - -## Ranking Score - -For each benchmark/leaderboard, we will compute an aggregate score based on the teams's performance on each participating dataset. - -A team must participate in at least 3 datasets to qualify for the competition. - -A team fails to qualify for a dataset if either 1) their algorithm does not support that dataset 2) the performance of their algorithm does not meet the threshold for that dataset (see below) for a benchmark that implements thresholds. - -The score on each participating dataset is based on the difference between the team's performance on that dataset and the performance of the baseline (see below). - -The team's final ranking score for a benchmark is the sum of individual participating dataset scores. - -In this way, teams that participate in more datasets can get an advantage. - -## Thresholds - -We implement thresholds for the recall/average precision and throughput benchmark/leaderboards. - -The thresholds are listed below for each dataset: - -|Dataset |R@10/AP |Throughput(qps) | -|-------------------|-----------|----------------| -|deep-1B |0.9 |2000 | -|bigann-1B |0.9 |2000 | -|msturing-1B |0.9 |2000 | -|msspacev-1B |0.0 |2000 | -|text2image-1B |0.86 |1762.363 | -|ssnpp-1B |0.9 |2000 | - -## Thresholds Explained - -We had to decide very early in the planning of this competition reasonable performance thresholds to implement. - -We observed that the performance of the T3 baseline algorithm (see below) was achieving at least 90% recall@10 at 2000qps for several datasets, so we decided to bake this into the competition rules. As our internal testing evolved we discovered that 1 dataset (text2image-1B) could not achieve those thresholds, and so we decided to lower the threshold based on the baseline performance we observed at that time. - -# Baseline - -## Algorithm and Hardware - -The baseline algorithm is based on FAISS library version 1.7.1. The FAISS library provides several ANN approaches and the baselined leverages the FAISS index called "IVF1048576,SQ8." The choice of this index was based on experimentation and consultation with Mathijs Douje of Facebook, one of the competition organizers. You can view the algorithm [here]. - -The hardware is 1 PCIe V100 NVidia GPU attached to an Advantech SKY motherboard with 768 GB RAM. More details about the hardware can be found [here](faiss_t3/README.md). - -## Threshold - -The baseline algorithm informed the track thresholds as explained in the Thresholds section above. - -## Scoring - -The performance of the baseline algorithm factors into the scoring used to rank participants, as explained in the Ranking Score section above. - -The benchmarks used in the score calculation are listed below for each dataset: - -|Dataset |Recall@10/AP|Throughput(qps) |Cost(wspq) | -|-------------------|------------|----------------|-----------| -|deep-1B |0.943 |4417.036 |0.113 | -|bigann-1B |0.927 |3086.656 |0.167 | -|msturing-1B |0.909 |2359.485 |0.204 | -|msspacev-1B |0.909 |2770.848 |0.167 | -|text2image-1B |0.860 |1762.363 |0.123 | -|ssnpp-1B |0.979 |5572.272 |0.095 | - -## Measurements - -The baseline benchmarks are based on the most recent measurements. At the start, and during the competition, we had published different numbers. The reason for the changes are described in [Appendix A](#appendix-a) below. - -# Appendix - -## Appendix A - -At the start of the competition, we had release the following thresholds: - -|Dataset |R@10/AP |Throughput(qps) | -|-------------------|-----------|----------------| -|deep-1B |0.9 |2000 | -|bigann-1B |0.9 |2000 | -|msturing-1B |0.9 |2000 | -|msspacev-1B |0.85 |1484.217 | -|text2image-1B |0.86 |1510.624 | -|ssnpp-1B |0.9 |2000 | - -The most recent thresholds have changed from the original for the following reasons: -* Changes in how recall was computing to deal with ties in distance. -* Changes in the radius for the range search dataset -* Also we decided to re-run the measurements of the baseline algorithm on the T3 baseline machine using the current evaluation framework. The original baselines were measured using a different software framework -* The original baselines published did not calculate and report baseline cost. - -The original baselines that were published are shown below. For recall: - -| dataset | qps | recall@10 | -| ------------ | -------- | --------- | -| msturing-1B | 2011.542 | 0.910 | -| bigann-1B | 2058.950 | 0.927 | -| text2image-1B| 2120.635 | 0.860 | -| deep-1B | 2002.490 | 0.942 | -| msspacev-1B | 2190.829 | 0.850 | - -And for throughput: - -| dataset | qps | recall@10 | -| ------------ | -------- | --------- | -| msturing-1B | 2421.856 | 0.902 | -| bigann-1B | 2186.755 | 0.905 | -| text2image-1B| 1510.624 | 0.882 | -| deep-1B | 3422.473 | 0.916 | -| msspacev-1B | 1484.217 | 0.869 | - -The following tables show the baseline performance on the range search dataset: - -Instead of recall, the range search dataset utilizes average precision: - -| dataset | qps | ap -| -----------| ---------| --------- -| ssnpp-1B | 2907.414 | 0.979 - -For throughput: - -| dataset | qps | ap | -| -----------| -------- | --------- | -| ssnpp-1B | 5572.272 | 0.910 | - -Here were the published baselines for power: - -| dataset | power(wspq) | -| ------------ | ------------| -| msturing-1B | 0.203740 | -| bigann-1B | 0.167123 | -| text2image-1B| 0.089675 | -| deep-1B | 0.112581 | -| msspacev-1B | 0.099569 | -| ssnpp-1B | 0.0944865 | - - -The following table lists the full measurements for baseline performance experiments that informed the original baselines (only recall and throughput are shown.) - -| dbase| throughtput(qps)| recall@10| -|-------------------|--------------------|---------------------| -| bigann-1B| 2186.754570| 0.904860| -| bigann-1B| 1926.901416| 0.911140| -| bigann-1B| 1657.226695| 0.919860| -| bigann-1B| 2058.950046| 0.926560| -| bigann-1B| 1931.042641| 0.932450| -| bigann-1B| 1770.748406| 0.937190| -| bigann-1B| 1609.052224| 0.941330| -| bigann-1B| 1504.748288| 0.943890| -| text2image-1B| 2607.779941| 0.834820| -| text2image-1B| 2456.621393| 0.841845| -| text2image-1B| 2285.966847| 0.851920| -| text2image-1B| 2120.635218| 0.860156| -| text2image-1B| 1917.445903| 0.867244| -| text2image-1B| 1748.662912| 0.873469| -| text2image-1B| 1612.313130| 0.878757| -| text2image-1B| 1510.624227| 0.882487| -| msspacev-1B| 2465.473370| 0.844805| -| msspacev-1B| 2190.828587| 0.850205| -| msspacev-1B| 1935.385102| 0.854864| -| msspacev-1B| 1931.506970| 0.858998| -| msspacev-1B| 1748.525911| 0.862437| -| msspacev-1B| 1585.766679| 0.865152| -| msspacev-1B| 1477.389358| 0.867912| -| msspacev-1B| 1484.216732| 0.868812| -| msturing-1B| 3625.040250| 0.881202| -| msturing-1B| 3197.403722| 0.888140| -| msturing-1B| 2907.993722| 0.893669| -| msturing-1B| 2655.951474| 0.898400| -| msturing-1B| 2421.855941| 0.902413| -| msturing-1B| 2233.241641| 0.905846| -| msturing-1B| 2070.942269| 0.908949| -| msturing-1B| 2011.542149| 0.910115| -| deep-1B| 3422.472565| 0.915540| -| deep-1B| 2732.133452| 0.920430| -| deep-1B| 2507.486404| 0.927790| -| deep-1B| 1992.323615| 0.932950| -| deep-1B| 2037.783443| 0.937940| -| deep-1B| 2002.489712| 0.941740| -| deep-1B| 1967.826369| 0.945130| -| deep-1B| 1874.898854| 0.947430| - -These baseline numbers were performed on the machine configuration used for the T3 faiss baseline. - -An older (now obsolete) code framework was used to determine these thresholds, not the existing evaluation framework so unfortunately there is no algos.yaml configuration file. - - - diff --git a/t3/README.md b/t3/README.md deleted file mode 100644 index 0f9969c50..000000000 --- a/t3/README.md +++ /dev/null @@ -1,355 +0,0 @@ -# T3 Track - -## Table Of Contents - -- [Introduction](#introduction) -- [For Participants](#for_participants) - - [Getting Started](#getting_started) - - [Starting Your Development](#starting_your_development) - - [Developing Your Dockerfile](#developing_your_dockerfile) - - [Developing Your Algorithm](#developing_your_algorithm) - - [Submitting Your Algorithm](#submitting_your_algorithm) - - [How To Get Help](#how_to_get_help) - - [Leaderboard Ranking](#leaderboard_ranking) - - [Baseline Performance](#baseline_performance) - - [Recall/Average Precision Leaderboard](#recall/average_precision_leaderboard) - - [Throughput Leaderboard](#throughput_leaderboard) - - [Power Leaderboard](#power_leaderboard) - - [Cost Leaderboard](#cost_leaderboard) -- [For Evaluators](#for_organizers) - - [Evaluating Participant Algorithms](#evaluating_participant_algorithms) - - [Participant Sends Hardware To Evaluators](#participant_sends_hardware_to_organizers) - - [Participant Gives Remote Access To Evaluators](#participant_gives_remote_access_to_organizer) - - [Participant Runs And Submits Benchmarks](#participant_runs_and_submits_benchmark) - - [Evaluating Power Consumption](#evaluating_power_consumption) - -## Introduction - -The T1 and T2 tracks of the competition restrict the evaluation of algorithms to standard Azure CPU servers with 64GB of RAM and 2TB of SSD. The only restriction in the T3 track is that the evaluation machine can be any hardware that is commercially available ( including any commercially available add-on PCIe boards ). T3 will maintain four leaderboards: -* One based on recall/average precision -* One based on throughput -* One based on power consumption -* One based on hardware cost - -You can see the latest leaderboards' status [here](LEADERBOARDS.md). - -Participants must submit their algorithm via a pull request and index file(s) upload (one per participating dataset). Participants are not required to submit proprietary source code such as software drivers or firmware. - -Competition evaluators will evaluate the participant's algorithm and hardware via one of these options: -* Participants send their hardware to the organizers at the participant's expense. -* Participants give organizers remote access to the hardware. -* Participants run the evaluation benchmarks on their own, and send the results to the organizers. - -## For_Participants - -### Requirements - -You will need the following installed on your machine: -* Python ( we tested with Anaconda using an environment created for Python version 3.8.5 ) -* Note that we tested everything on Ubuntu Linux 18.04 but other environments should be possible. -* It's assumed that all the software drivers and services need to support your hardware are installed on development machines. For example, to run the T3 baseline, your system must have a Cuda 11 compatibile GPU, Cuda 11.0, and the cuda 11.0 docker run-time installed. See the T3 baseline [installation instructions](faiss_t3/README.md). Cuda versions greater than 11.0 should be possible, but weren't tested. - -### Getting_Started - -This section will present a small tutorial about how to use this framework and several of the key scripts you will use throughout the development of your algorithm and eventual submission. - -First, clone this repository and cd into the project directory: -``` -git clone -``` -Install the python package requirements: -``` -pip install -r requirements.txt -``` -Create a small, sample dataset: -``` -python create_dataset.py --dataset random-xs -``` -Build the docker container for the T3 baseline: -``` -python install.py --dockerfile t3/faiss_t3/Dockerfile -``` -Run a benchmark evaluation using the algorithm's definition file: -``` -python run.py --t3 --definitions t3/faiss_t3/algos.yaml --dataset random-xs -``` -Please note that the *--t3* flag is important. - -Now analyze the results: -``` -python plot.py --definitions t3/faiss_t3/algos.yaml --dataset random-xs -``` -This will place a plot of the algorithms performance, recall/average_precision-vs-throughput, into the *results/* directory. - -### Starting_Your_Development - -First, please create a short name for your team without spaces or special characters. Henceforth in these instructions, this will be referenced as [your_team_name]. - -Create a custom branch off main in this repository: -``` -git checkout -b t3/[your_team_name] -``` -In the *t3/* directory, create a sub-directory using that name. -``` -mkdir t3/[your_team_name] -``` - -### Developing_Your_Dockerfile - -This framework evaluates algorithms in Docker containers by default. Your algorithm's Dockerfile should live in your team's subdirectory at *t3/[your_team_name]*. Ideally, your Docker file should contain everything needed to install and run your algorithm on a system with the same hardware. Given the nature of T3, this will not likely be entirely possible since custom hardware host drivers and certain low level host libraries require an installation step outside of what can be accomplished with Docker alone. Please make your best effort to include as much installation and setup within your Docker container, as we want to promote as much transparency as possible among all participants. - -Please consult the Dockerfile [here](faiss_t3/Dockerfile) for an example. - -To build your Docker container, run: -``` -python install.py --dockerfile t3/[your_team_name]/Dockerfile -``` - -### Developing_Your_Algorithm - -Develop and add your algorithm's python class to the [benchmark/algorithms](../benchmark/algorithms) directory. -* You will need to subclass from the [BaseANN class](../benchmark/algorithms/base.py) and implement the functions of that parent class. -* You should consult the examples already in the directory. - -As you develop and test your algorithm, you will likely need to test on smaller datasets. This framework provides a way to create datasets of various sizes. For example, to create a dataset with 10000 20-dimensional random floating point vectors, run: -``` -python create_dataset.py --dataset random-xs -``` -To see a complete list of datasets, run the following: -``` -python create_dataset.py --help -``` -When you are ready to test on the competition datasets, use the create_dataset.py script as follows: -``` -python create_dataset.py --dataset [sift-1B|bigann-1B|text2image-1B|msturing-1B|msspacev-1B|ssnpp-1B] -``` -To benchmark your algorithm, first create an algorithm configuration yaml in your teams directory called *algos.yaml.* This file contains the index build parameters and query parameters that will get passed to your algorithm at run-time. Please look at [this example](faiss_t3/algos.yaml). - -Now you can benchmark your algorithm using the run.py script: -``` -python run.py --t3 --definitions t3/[your_team_name]/algos.yaml --dataset random-xs -``` -This will write the results to the toplevel [results](../results) directory. - -Now you can analyze the results by running: -``` -python plot.py --definitions t3/[your_team_name]/algos.yaml --dataset random-xs -``` -This will place a plot of the algorithms performance, recall/average_precision-vs-throughput, into the toplevel [results](../results) directory. - -The plot.py script supports other benchmarks. To see a complete list, run: -``` -python plot.py --help -``` - -### Submitting_Your_Algorithm - -A submission is composed of the following: -* 1 index binary file(s) for each dataset for which you are participating (see *Index File* section below.) -* 1 *algos.yaml* with only one set of build parameters and at most 10 sets of query parameters for each dataset in which you are participating. Please put that file into the *t3/[your_team_name]/* directory. -* Your algorithm's python class ( placed in the [benchmark/algorithms/](../benchmark/algorithms) directory.) -* 1 README file with specific information about your hardware and software (see *README File* section below.) -* Evidence of the cost of your hardware components (see *README File* section below.) -* Optional information (see *Optional Information* section below.) - -### Index File - -The binary index file(s) must be http or azcopy accessible and is referenced within your *t3/[your_team_name]/algos.yaml* config file. Please see the baseline [algos.yaml](faiss_t3/algos.yaml) example. - -### The README File - -Your submission's top-level directory should contain a README.md with the following sections: -* **Hardware Configuration And Cost** This section must contain a table that breaks down the hardware components of your system and the cost. Each entry should link to evidence of the component cost. -* **Hardware Access** This section describes how evaluators acquire access to the hardware (specific instructions or contact information.) -* **No Source Declarations** This section must contain a list of software components that were not provided with the submission. -* **Hardware Setup And Software Installation** This section should contain any hardware and software installation instructions. -* **Run Competition Algorithm** This section should contain instructions to run the competition algorithm, for example, run scripts. - -Please consult the baseline [README](faiss_t3/README.md) example. - -### Optional Information - -Please feel free to append sections to the base README requirements. For example, you can include other benchmarks of interest. - -### How_To_Get_Help - -There are several ways to get help as you develop your algorithm using this framework: -* You can submit an issue at this github repository. -* Send an email to the competition's T3 organizer, gwilliams@gsitechnology.com -* Send en email to the competition's googlegroup, big-ann-organizers@googlegroups.com - -### Leaderboard_Ranking - -T3 will maintain four different leaderboards 1) one based on recall/average precision 2) one based on throughput 3) one based on power consumption and 4) one based on cost. The details of the ranking metrics are described here. - -#### Baseline_Performance - -A performance baseline has been established for this competition, based on both queries per second (qps) and recall/average precision (recall measured as recall@10.) For the recall leaderboard, we will rank participants by recall@10/average precision at 2K qps. - -The table below shows the baseline recall@10 for all the (knn search type) datasets at or above 2K qps ( or as close to it as possible. ) - -| dataset | recall@10 | -| ------------ | --------- | -| msturing-1B | 0.909 | -| bigann-1B | 0.927 | -| text2image-1B| 0.860 | -| deep-1B | 0.943 | -| msspacev-1B | 0.909 | - -For the throughput leaderboard, we will rank participants by qps at or above 90% recall@10 (or close to it as possible.) The table below shows the baseline throughput for all the (knn search type) datasets near 90% recall@10. - -| dataset | qps | -| ------------ | -------- | -| msturing-1B | 2359.485 | -| bigann-1B | 3086.656 | -| text2image-1B| 1762.363 | -| deep-1B | 4417.036 | -| msspacev-1B | 2770.848 | - -The following tables show the baseline performance on the range search dataset: - -Instead of recall, the range search dataset utilizes average precision: - -| dataset | ap | -| -----------| -------| -| ssnpp-1B | 0.979 | - -For throughput: - -| dataset | qps | -| -----------| -------- | -| ssnpp-1B | 5572.272 | - -The following table shows the baseline performance on all datasets for power, shown as watt-seconds/query: - -| dataset | wspq | -| ------------ | -------- | -| msturing-1B | 0.204 | -| bigann-1B | 0.167 | -| text2image-1B| 0.123 | -| deep-1B | 0.113 | -| msspacev-1B | 0.167 | -| ssnpp-1B | 0.095 | - -The following table shows the baseline performance on all datasets for cost: - -| dataset | cost | -| ------------ | --------------- | -| msturing-1B | $1,018,332.30 | -| bigann-1B | $785,282.45 | -| text2image-1B| $1,298,436.77 | -| deep-1B | $545,952.10 | -| msspacev-1B | $873,460.84 | -| ssnpp-1B | $429,634.84 | - -Details of how power is measured and how cost is computed are explained further down. - -Baselines were measured on an 56 core Intel Xeon system with 700GB RAM and a V100 Nvidia GPU using the FAISS library using the index strategy called IVF1048576,SQ8. - -Please consult [this document](RANKING.md) for a detailed discussion about how the baseline is used to inform competition thresholds and participant scoring, as well as recent developments on baseline performance. - -#### Recall/Average_Precision_Leaderboard - -This leaderboard leverages the standard recall@10 vs throughput benchmark that has become a standard benchmark when evaluating and comparing approximate nearest neighbor algorithms. We will rank participants based on recall@10/average_precision at 2K qps por each dataset. The evaluation framework allows for 10 different search parameter sets and we will use the best value of recall@10 from the set. - -The final ranking will be based on a computed score, which is the sum of the improvements in recall over the baseline for the participating databases. A submission must participate in at least 3 databases. - -Participants that cannot meet or exceed the 2000 qps minimum threshold ( or 1762.363 qps for Text2Image ) for a dataset will be dropped from ranking consideration for that dataset. - -Please consult [this document](RANKING.md) for a detailed discussion about how the baseline is used to inform competition thresholds and participant scoring. - -#### Throughput_Leaderboard - -This leaderboard also leverages the standard recall@10 vs throughput benchmark. We will rank participants based on throughput (qps) at the recall@10 threshold of 90%. The evaluation framework allows for 10 different search parameter sets and we will use the best value of throughput from the set. - -The final ranking will be based on a computed score, which is the sum of the improvements in throughput over the baseline for the participating databases. A submission must participate in at least 3 databases. - -Please consult [this document](RANKING.md) for a detailed discussion about how the baseline is used to inform competition thresholds and participant scoring. - -#### Power_Leaderboard - -This leaderboard is related to power consumption, which is an important consideration when scaling applications and servers in a datacenter. The primary ranking metric is ( kilowatt-hour / query.) Participants must meet or exceed the recall@10 and 2000QPS minimum threshold ( for Text2Image, the baseline performance is the minimum threshold.) The reason for those minimum thresholds is to discourage algorithm’s designers from purposefully sacrificing too much performance in order to lower the power consumption. - -The evaluation framework leverages the power sensors available in the standard IPMI power management interface of most commercial server chassis’. We also leverage the open source project ipmicap ( https://github.com/fractalsproject/ipmicap ) to capture the power sensors and calculate the power consumption. - -During evaluation, for each search parameter set, power consumption is acquired over at least 10 seconds running search on the entire query set. During that 10 seconds, multiple consecutive runs on the query set may occur in order to maintain a minimum duration of 10 seconds. Also, the duration may be greater than 10 seconds if a run of 1 query set takes longer than 10 seconds. So a run could be composed of 1 batch query or several and the duration will be at least 10 seconds The power consumption acquired for the run is divided by the total number of queries performed during the run, resulting in ( kilowatt-hour / query ). Up to 10 search parameter sets are allowed, and we use the minimum value for ranking participants, for each dataset. - -The final ranking will be based on a computed score, which is the sum of the improvements in power consumption over the baseline for the participating databases. A submission must participate in at least 3 databases. - -There are no direct thresholds for this benchmark/leaderboard. Indirectly, participants that cannot meet or exceed the recall@10 threshold ( or 0.86 for Text2Image ) for a dataset will be dropped from ranking consideration for that dataset. Scoring will be based on the baseline performance for power consumption, shown here for each dataset: - -Please consult [this document](RANKING.md) for a detailed discussion about how the baseline is used to inform participant scoring for this benchmark. - -#### Cost_Leaderboard - -This leaderboard is related to cost, which is an important consideration when scaling applications and servers in a datacenter. The primary ranking metric will be an estimate of capital expense (capex) + operational expense (opex) that is required to scale the participant’s system to 100,000 qps that meets or exceeds the baseline recall@10. - -The formula for the capex estimate is as follows: - -capex = (MSRP of all the hardware components of the system ) X ( minimum number of systems needed to scale to support 100,000 qps ) - -The hardware components include the chassis and all of the electronics within the chassis including the power supplies, motherboard, HDD/SSD, and all extension boards. Participants must provide evidence of MSRP of components ( either published on a web-site or a copy of a invoice/receipt with customer identifiable information removed. ) Volume based pricing is not considered. - -The formula for the opex estimate is as follows: - -opex = ( max qps at or greater than the baseline recall @10 threshold ) X ( kilowatt-hour / query ) X ( seconds / hour ) X ( hours / year) X ( 5 years ) X ( dollars / kilowatt-hour ) X ( minimum number of systems needed to scale to support 100,000 qps ) - -Notes on this formula: -* We will use the maximum qps actually measured that meets or exceeds the baseline recall@10 threshold across all query set parameters. -* We do not account for the cost related to the physical footprint of the system(s) such as the cost of the space occupied by the system(s) in the datacenter. -* We assume linear horizontal scalability of systems with zero cost. In other words, we do not account for the costs associated when actually clustering multiple systems needed to obtain 100,000 qps ( networking equipment, costs due to routing traffic among systems, costs due to merging results, etc. ) -* We will use $0.10 / kilowatt-hour for the power consumption cost. -* 5 years is the standard hardware depreciation schedule used for tax purposes with the Internal Revenue Service -* We’d like to thank David Rensin, former Senior Director at Google Cloud, now SVP at Pendo.io for his valuable contribution and consultation with respect to the capex and opex formulas. - -The final ranking will be based on a computed score, which is the sum of the improvements in cost over the baseline for the participating databases. A submission must participate in at least 3 databases. - -There are no direct thresholds for this benchmark/leaderboard. Indirectly, participants that cannot meet or exceed the recall@10 minimum threshold ( or 0.86 for Text2Image ) for a dataset will be dropped from ranking consideration for that dataset. - -## For_Evaluators - -### Evaluating_Participant_Algorithms - -How a participant's algorithm is benchmarked will depend on how they registered for the T3 competition, one of these options: -* Participant sent hardware to competition evaluator at participant's expense. -* Participant is giving the competition valuator remote SSH access to their machine. -* Participant will run the evaluation framework on their own and send the benchmark results to the competition evaluator. - -Evaluation steps for each option is detailed in the next sections. - -### Participant_Sends_Hardware_To_Evaluators - -Evaluators will work with participant's that send hardware during competition on-boarding. Hardware will be sent and returned at the participant's expense. - -Evaluators and participants will work closely to make sure the hardware is properly installed and configured. - -Evaluators may allow remote access to the machines in order to complete the setup, as needed. - -### Participant_Gives_Remote_Access_To_Evaluators - -Participants give competition evaluators access to remote machines via SSH. - -### Participant_Runs_And_Submits_Benchmarks - -This is a very special case, and not all participant's will have this option. In this case, the participant will run the evaluation on their own. They will export the data to a CSV via the export.py script and send it to the the competition evaluators. Participants are still required to submit a pull request and upload their best index. - -## Evaluating_Power_Consumption - -The hardware chassis which houses all the hardware must support the IPMI management interface. - -Determine the IP address, port, and authentication credentials of that interface. - -Follow the instructions at IPMICAP open-source project ( http://www.github.com/fractalsproject/ipmicap ) to access the IPMI and configure it to listen to an available port number. - -Capture the machine IP address of the machine which is running IPMICAP ( it does not have to be the same machine as the target hardware. - -Now run the following for each competition dataset: -``` -python run.py --dataset [DATASET] --t3 --definitions [DEFINITION FILE] --sensors [S1 S2 ...] --sessions --powercapture [IPMICAP_MACHINE_IP]:[IPMICAP_LISTEN_PORT]:[TIME_IN_SECONDS] -``` -where S1, S2,... = the sensor records for all POWER_IN sensors ( please see the IPMCAP documentation to determine these sensor records. - -This will monitor power consumption over that period of time ( 10 seconds is a good number ). - -You can retrieve a plot of the power consumptions ( measured as watt-seconds/query ) using the plot.py script. - diff --git a/t3/TASKS_ISSUES_RESOLUTIONS.md b/t3/TASKS_ISSUES_RESOLUTIONS.md deleted file mode 100644 index 37c782b57..000000000 --- a/t3/TASKS_ISSUES_RESOLUTIONS.md +++ /dev/null @@ -1,46 +0,0 @@ - -# BigANN Challenge T3 Tasks, Issues, and Resolutions - -In the spirit of maintaining a fair and open competition, we will be tracking all important remaining tasks and issues, and their respective resolution - and making that all public here on this README. All competition rankings and winners will be "unofficial" until all tasks and issues have been resolved. - -Participants should send their questions and issues to the T3 organizer directly (gwilliams@gsitechnology.com), or to the competition google group at big-ann-organizers@googlegroups.com. Note that some issues may require a complete re-evaluation of an algorithm on its respective hardware, or may require additional information from a participant or competition organizer(s). - -## Tasks (open) - -* [T3 Organizer self-report] In the private set evaluation, there are issues with the msspace-v ground truth file preventing any submission evaluation and scoring on that dataset. -* [T3 Organizer self-report] In the private set evaluation, there are issues with some submissions crashing on either/both deep-1B and msspacev-1b thus preventing any scoring on that dataset. -* [Microsoft to T3 Organizer] Currently, DiskANN cannot qualify for power and cost benchmarks due to issue with running IPMICAP ( python ipmi in particular seems to be the issue. ) - * PENDING RESOLUTION: [T3 Organizer to Microsoft] We will work on local dcmi support in the IPMICAP server. -* [T3 Organizer to Microsoft] Need to retrieve "results" h5py files from MS DiskANN remote machine. - -## Issues (open) - -* [T3 Organizer self-report] The "opex" power cost for an Nvidia submission seems impossibly low ($80). - * PENDING RESOLUTION: We need to measure quiescent power of a system and establish the min power consumption and troubleshoot the DCMI power reporting on the NVidia system. - -## Resolutions - -* [GSI asked] What does NQ mean? - * [T3 Organizer responded] It could mean 1) team did not submit a qualifying algorithm for the benchmark 2) team decided did not participate in that benchmark 3) unable to get some key data for the benchmark (such as power or system cost, or both ). -* [T3 Organizer self-report] Need to retrieve "results" h5py files from NVidia's remote machine. - * Done on 11/23/2021 -* [T3 Organizer to NVidia] Need to retrieve power monitoring "results" h5py files from NVidia's remote machine. - * Done on 11/23/2021 and subsequently on changes to algos.yaml -* [GSI to T3 Organizer] Need better documentation for how to extract power benchmark from plot.py script. - * Answered via email. Basically, you need to supply "wspq" as an explicit metric you want to retrieve using the chosen axis. Run "python ploy.py --help" to get more information. -* [GSI to T3 Organizers] We cannot reproduce the baseline performance on SSNPP on same/similar hardware. - * Organizer repeated the eval and it was the same. It could have been an issue with faiss (gpu) library and version. -* [GSI to T3 Organizers] Have you discussed taking power also on the recall working point and not just on the throughput working point? -[GSI asks T3 Organizers] Since some algorithms implement smart caching mechanisms to simulate real life scenarios and since the competition framework sends the same queries again and again 50 time for each dataset (5 runs x 10 query configurations) which is not a real life case. It is important that competition framework needs to verify the results, automatically (and if not possible manually) that no caching mechanism is used in between runs and in between query configurations. One way is to make sure that the throughput for the runs doesn’t differ much taking into account that there are 5 runs and 10 configurations with the same queries. Probably a better way is to send for different queries or somehow cool down the cache in between runs by sending random queries. - * The eval framework now implements "possible query response cache" detection and the competition reports this as an anomaly and allows teams to explain why these happen. It's too late in the competition to establish a policy to deal with these "anomalies" such as 1) ask team to mitigate the effect 2) cool the cache with random queries 3) throw out the data. -* [T3 Organizer to Microsoft] Currently reported DiskANN CSV results is using an old version of recall computation (ie, not accounting for ties and it will likely affect msspacev-1B recall mostly). - * This was resolved. Microsoft exported a new csv with the proper recall. -* [GSI to T3 Organizer] New index for SSNPP and Text2Image requires re-evaluation for those datasets and updated scores. - * This was done successfully on the public query set. -* [Intel asks T3 Organizer] Why won't there be one winner for T3 that combines all individual benchmarks? - * We have provided the reason to Intel. We weren't sure how to combine them in this first competition - likely it will be combined in the future. -* [Intel asks T3 Organizer] Why are power and cost rankings optional for a submission? - * We have provided the reason to Intel. We werent sure how easy it would be to support this for all participants in this first competition. -* [T3 Organizer asks NVidia] Can't we use an MSRP from another company as proxy for system cost? - * We will take the cheapest MSRP from third party seller for the leaderboard (we found 150K). We've footnoted this in the rankings. - diff --git a/t3/eval_2021/faiss_t3/prun.sh b/t3/eval_2021/faiss_t3/prun.sh deleted file mode 100755 index a34886dca..000000000 --- a/t3/eval_2021/faiss_t3/prun.sh +++ /dev/null @@ -1,15 +0,0 @@ -#!/bin/bash - -#python run.py --t3 --private-query --definitions t3/faiss_t3/algos.yaml --dataset deep-1B --nodocker -python run.py --t3 --private-query --definitions t3/faiss_t3/algos.yaml --dataset bigann-1B --nodocker -#python run.py --t3 --private-query --definitions t3/faiss_t3/algos.yaml --dataset text2image-1B --nodocker -#python run.py --t3 --private-query --definitions t3/faiss_t3/algos.yaml --dataset msturing-1B --nodocker -#python run.py --t3 --private-query --definitions t3/faiss_t3/algos.yaml --dataset msspacev-1B --nodocker -#python run.py --t3 --private-query --definitions t3/faiss_t3/algos.yaml --dataset ssnpp-1B --nodocker - -#python run.py --definitions t3/faiss_t3/algos.yaml --dataset deep-1B --t3 --private-query --nodocker --power-capture 192.168.99.110:1237:10 -#python run.py --definitions t3/faiss_t3/algos.yaml --dataset bigann-1B --t3 --private-query --nodocker --power-capture 192.168.99.110:1237:10 -#python run.py --definitions t3/faiss_t3/algos.yaml --dataset text2image-1B --t3 --private-query --nodocker --power-capture 192.168.99.110:1237:10 -#python run.py --definitions t3/faiss_t3/algos.yaml --dataset msturing-1B --t3 --private-query --nodocker --power-capture 192.168.99.110:1237:10 -#python run.py --definitions t3/faiss_t3/algos.yaml --dataset msspacev-1B --t3 --private-query --nodocker --power-capture 192.168.99.110:1237:10 -#python run.py --definitions t3/faiss_t3/algos.yaml --dataset ssnpp-1B --t3 --private-query --nodocker --power-capture 192.168.99.110:1237:10 diff --git a/t3/faiss_t3/Dockerfile b/t3/faiss_t3/Dockerfile deleted file mode 100644 index 5720572f6..000000000 --- a/t3/faiss_t3/Dockerfile +++ /dev/null @@ -1,35 +0,0 @@ - -FROM nvidia/cuda:11.0-devel-ubuntu18.04 - -ENV PATH="/root/miniconda3/bin:${PATH}" -ARG PATH="/root/miniconda3/bin:${PATH}" - -# CONDA - -RUN apt-get update && apt-get install -y wget build-essential git - -RUN wget \ - https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh \ - && mkdir /root/.conda \ - && bash Miniconda3-latest-Linux-x86_64.sh -b \ - && rm -f Miniconda3-latest-Linux-x86_64.sh \ - && conda --version \ - && conda install -c pytorch python=3.6.9 faiss-gpu cudatoolkit=11.0 - -RUN conda --version && which conda && which python && which pip3 - -# BIGANN - -RUN pip3 install -U pip - -WORKDIR /home/app -COPY t3/faiss_t3/faiss-gpu_requirements.txt run_algorithm.py ./ -RUN pip3 install -r faiss-gpu_requirements.txt - -ENTRYPOINT ["python3", "run_algorithm.py"] - -## For the following RUN command to work, we need to initiate docker build -## with a gpu device request much like what's done with docker eval run. -# RUN python3 -c 'import faiss; print("gpus=", faiss.get_num_gpus())' - -RUN python3 -c 'import faiss; print(faiss.IndexFlatL2)' diff --git a/t3/faiss_t3/README.md b/t3/faiss_t3/README.md deleted file mode 100644 index d7dadadeb..000000000 --- a/t3/faiss_t3/README.md +++ /dev/null @@ -1,66 +0,0 @@ -# FAISS Baseline - -This README contains information required for T3 Track submissions. - -## Hardware Configuration And Cost - -|Part |Model |No. |Unit Price |Total Price| -|-----------------------------|--------------------------------------------------|----|------------------------------------|-----------| -|Chassis and Motherboard |[Advantech Sky-6200 2U](cost/AdvantechSky6200.pdf)| 1|[5572.42](cost/AdvantechSky6200.pdf)| 5572.42| -|RAM |[Advantech 32GB Memory](cost/RAM.pdf) | 24| [259.00](cost/RAM.pdf)| 6216.00| -|SSD |[2TB SeaGate](cost/SSD.pdf) | 1| [334.48](cost/SSD.pdf)| 334.48| -|GPU |[NVidia V100](cost/GPU.pdf) | 1| [9899.00](cost/GPU.pdf)| 9899.00| -|Total | | 1| | 22021.90| - -## Hardware Access - -This hardware is maintained by the competition organizers. Please send an email to big-ann-organizers@googlegroups.com to get access to a system or see the section below to build your own system. - -## No Source Code Declarations - -This submission requires the following software components where source-code is not available and/or not part of the source-code for this submission: -* NVidia docker container runtime ( https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html ) -* CUDA 11 libraries and host drivers -* NVidia V100 firmware - -## Hardware Setup And Software Installation - -## Prerequisites - -* Linux Ubuntu 18.04 -* CUDA 11.0 -* The NVidia docker container runtime ( https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html ) -* This cloned project repository - -### Test On A Small Dataset - -Note that all the subsequent commands must be run in the top-level directory of this repo on your machine. - -First build the faiss_t3 docker container: -``` -python install.py --dockerfile t3/faiss_t3/Dockerfile -``` -Now create a small random dataset, query set, and associated ground truth: -``` -python create_dataset.py --dataset random-xs -``` -Now build a FAISS index for this dataset: -``` -python run.py --definitions t3/faiss_t3/algos.yaml --dataset random-xs --t3 -``` -This may take an hour or so. When it's done, you can plot the recall-vs-throughput results as follows: -``` -python plot.py --definitions t3/faiss_t3/algos.yaml --dataset random-xs -``` -You can now run on the competition datasets. - -#### Known Issues - -The NVidia GPU docker support for various Linux distributions involves a lot of steps. - -The run.py script also supports a "--nodocker" flag. When run in this way, the algorithm is not launched in a docker container. Obviously, this requires having CUDA 11.0 drivers working natively on your system, and the installation of the compatible FAISS GPU library. - -If you take this route, we recommend using the Anaconda distribution of python, creating a python=3.8.5 environment, and installing FAISS using this command: -``` -conda install -c pytorch faiss-gpu cudatoolkit=11.0 -``` diff --git a/t3/faiss_t3/algos.yaml b/t3/faiss_t3/algos.yaml deleted file mode 100644 index 4919134e6..000000000 --- a/t3/faiss_t3/algos.yaml +++ /dev/null @@ -1,231 +0,0 @@ -random-xs: - faiss-t3: - docker-tag: billion-scale-benchmark-faiss_t3 - module: benchmark.algorithms.faiss_t3 - constructor: FaissT3 - base-args: ["@metric"] - run-groups: - base: - # the following args show all build arguments that can be provided to the t3 baseline. - args: | - [{"indexkey": "IVF5,SQ8", - "buildthreads": -1, - "by_residual": -1, - "add_bs": 100000, - "maxtrain": 0, - "clustering_niter": -1, - "add_splits": 1, - "no_precomputed_tables": true, - "searchthreads": -1, - "parallel_mode": 3, - "search_bs": 8192 - }] - query-args: | - ["nprobe=1", - "nprobe=2", - "nprobe=4", - "nprobe=8", - "nprobe=16", - "nprobe=32", - "nprobe=64", - "nprobe=128", - "nprobe=256"] -deep-1B: - faiss-t3: - docker-tag: billion-scale-benchmark-faiss_t3 - module: benchmark.algorithms.faiss_t3 - constructor: FaissT3 - base-args: ["@metric"] - run-groups: - base: - args: | - [{"indexkey": "IVF1048576,SQ8", - "url":"https://storage.googleapis.com/bigann/indexes/faiss/1.7.1/deep-1B.IVF1048576%2CSQ8.faissindex"}] - query-args: | - ["nprobe=90", - "nprobe=100", - "nprobe=110", - "nprobe=120", - "nprobe=128", - "nprobe=140", - "nprobe=160", - "nprobe=180", - "nprobe=200", - "nprobe=220"] -bigann-1B: - faiss-t3: - docker-tag: billion-scale-benchmark-faiss_t3 - module: benchmark.algorithms.faiss_t3 - constructor: FaissT3 - base-args: ["@metric"] - run-groups: - base: - args: | - [{"indexkey": "IVF1048576,SQ8", - "url":"https://storage.googleapis.com/bigann/indexes/faiss/1.7.1/bigann-1B.IVF1048576%2CSQ8.faissindex"}] - query-args: | - ["nprobe=90", - "nprobe=100", - "nprobe=110", - "nprobe=120", - "nprobe=128", - "nprobe=140", - "nprobe=160", - "nprobe=180", - "nprobe=200", - "nprobe=220"] -msspacev-1B: - faiss-t3: - docker-tag: billion-scale-benchmark-faiss_t3 - module: benchmark.algorithms.faiss_t3 - constructor: FaissT3 - base-args: ["@metric"] - run-groups: - base: - args: | - [{"indexkey": "IVF1048576,SQ8", - "url":"https://storage.googleapis.com/bigann/indexes/faiss/1.7.1/msspacev-1B.IVF1048576%2CSQ8.faissindex"}] - query-args: | - ["nprobe=1", - "nprobe=2", - "nprobe=4", - "nprobe=8", - "nprobe=16", - "nprobe=32", - "nprobe=64", - "nprobe=128", - "nprobe=256"] -text2image-1B: - faiss-t3: - docker-tag: billion-scale-benchmark-faiss_t3 - module: benchmark.algorithms.faiss_t3 - constructor: FaissT3 - base-args: ["@metric"] - run-groups: - base: - args: | - [{"indexkey": "IVF1048576,SQ8", - "url":"https://storage.googleapis.com/bigann/indexes/faiss/1.7.1/text2image-1B.IVF1048576%2CSQ8.faissindex"}] - query-args: | - ["nprobe=90", - "nprobe=100", - "nprobe=110", - "nprobe=120", - "nprobe=128", - "nprobe=140", - "nprobe=160", - "nprobe=180", - "nprobe=200", - "nprobe=220"] -msturing-10M: - faiss-t3: - docker-tag: billion-scale-benchmark-faiss_t3 - module: benchmark.algorithms.faiss_t3 - constructor: FaissT3 - base-args: ["@metric"] - run-groups: - base: - args: | - [{"indexkey": "IVF1048576,SQ8", - "url":"https://storage.googleapis.com/bigann/indexes/faiss/1.7.1/msturing-10M.IVF1048576%2CSQ8.faissindex"}] - query-args: | - ["nprobe=1", - "nprobe=2", - "nprobe=4", - "nprobe=8", - "nprobe=16", - "nprobe=32", - "nprobe=64", - "nprobe=128", - "nprobe=256"] -msturing-1B: - faiss-t3: - docker-tag: billion-scale-benchmark-faiss_t3 - module: benchmark.algorithms.faiss_t3 - constructor: FaissT3 - base-args: ["@metric"] - run-groups: - base: - args: | - [{"indexkey": "IVF1048576,SQ8", - "url":"https://storage.googleapis.com/bigann/indexes/faiss/1.7.1/msturing-1B.IVF1048576%2CSQ8.faissindex"}] - query-args: | - ["nprobe=28", - "nprobe=32", - "nprobe=37", - "nprobe=42", - "nprobe=47", - "nprobe=52", - "nprobe=57", - "nprobe=62", - "nprobe=64", - "nprobe=70"] -ssnpp-1M: - faiss-t3: - docker-tag: billion-scale-benchmark-faiss_t3 - module: benchmark.algorithms.faiss_t3 - constructor: FaissT3 - base-args: ["@metric"] - run-groups: - base: - args: | - [{"indexkey": "OPQ32_128,IVF100_HNSW32,PQ32", - "url":"https://storage.googleapis.com/bigann/indexes/faiss/1.7.1/ssnpp-1M.IVF1048576%2CSQ8.faissindex", - "parallel_mode": 2, - "add_splits": 0}] - query-args: | - ["nprobe=1", - "nprobe=2", - "nprobe=4", - "nprobe=8", - "nprobe=16", - "nprobe=32", - "nprobe=64", - "nprobe=128", - "nprobe=256"] -ssnpp-10M: - faiss-t3: - docker-tag: billion-scale-benchmark-faiss_t3 - module: benchmark.algorithms.faiss_t3 - constructor: FaissT3 - base-args: ["@metric"] - run-groups: - base: - args: | - [{"indexkey": "OPQ32_128,IVF1048576_HNSW32,PQ32", - "url":"https://storage.googleapis.com/bigann/indexes/faiss/1.7.1/ssnpp-10M.OPQ32_128%2CIVF1048576_HNSW32%2CPQ32.faissindex", - "parallel_mode": 2, - "add_splits": 0}] - query-args: | - ["nprobe=1", - "nprobe=2", - "nprobe=4", - "nprobe=8", - "nprobe=16", - "nprobe=32", - "nprobe=64", - "nprobe=128", - "nprobe=256"] -ssnpp-1B: - faiss-t3: - docker-tag: billion-scale-benchmark-faiss_t3 - module: benchmark.algorithms.faiss_t3 - constructor: FaissT3 - base-args: ["@metric"] - run-groups: - base: - args: | - [{"indexkey": "IVF1048576,SQ8", - "url":"https://storage.googleapis.com/bigann/indexes/faiss/1.7.1/ssnpp-1B.IVF1048576%2CSQ8.faissindex", - "parallel_mode": 2, - "add_splits": 0}] - query-args: | - ["nprobe=1", - "nprobe=2", - "nprobe=4", - "nprobe=8", - "nprobe=16", - "nprobe=32", - "nprobe=64", - "nprobe=128", - "nprobe=256"] diff --git a/t3/faiss_t3/baseline_plots/bigann-1B-r-vs-p.png b/t3/faiss_t3/baseline_plots/bigann-1B-r-vs-p.png deleted file mode 100755 index fcbbe1470..000000000 Binary files a/t3/faiss_t3/baseline_plots/bigann-1B-r-vs-p.png and /dev/null differ diff --git a/t3/faiss_t3/baseline_plots/bigann-1B-r-vs-t.png b/t3/faiss_t3/baseline_plots/bigann-1B-r-vs-t.png deleted file mode 100755 index b0dac5d7f..000000000 Binary files a/t3/faiss_t3/baseline_plots/bigann-1B-r-vs-t.png and /dev/null differ diff --git a/t3/faiss_t3/baseline_plots/deep-1B-r-vs-p.png b/t3/faiss_t3/baseline_plots/deep-1B-r-vs-p.png deleted file mode 100755 index ea9594c98..000000000 Binary files a/t3/faiss_t3/baseline_plots/deep-1B-r-vs-p.png and /dev/null differ diff --git a/t3/faiss_t3/baseline_plots/deep-1B-r-vs-t.png b/t3/faiss_t3/baseline_plots/deep-1B-r-vs-t.png deleted file mode 100755 index f5d7e0265..000000000 Binary files a/t3/faiss_t3/baseline_plots/deep-1B-r-vs-t.png and /dev/null differ diff --git a/t3/faiss_t3/baseline_plots/msspacev-1B-r-vs-p.png b/t3/faiss_t3/baseline_plots/msspacev-1B-r-vs-p.png deleted file mode 100755 index dc25cc1b9..000000000 Binary files a/t3/faiss_t3/baseline_plots/msspacev-1B-r-vs-p.png and /dev/null differ diff --git a/t3/faiss_t3/baseline_plots/msspacev-1B-r-vs-t.png b/t3/faiss_t3/baseline_plots/msspacev-1B-r-vs-t.png deleted file mode 100755 index 6c69b3b2d..000000000 Binary files a/t3/faiss_t3/baseline_plots/msspacev-1B-r-vs-t.png and /dev/null differ diff --git a/t3/faiss_t3/baseline_plots/msturing-1B-r-vs-p.png b/t3/faiss_t3/baseline_plots/msturing-1B-r-vs-p.png deleted file mode 100755 index 80912d64e..000000000 Binary files a/t3/faiss_t3/baseline_plots/msturing-1B-r-vs-p.png and /dev/null differ diff --git a/t3/faiss_t3/baseline_plots/msturing-1B-r-vs-t.png b/t3/faiss_t3/baseline_plots/msturing-1B-r-vs-t.png deleted file mode 100755 index df8df2b09..000000000 Binary files a/t3/faiss_t3/baseline_plots/msturing-1B-r-vs-t.png and /dev/null differ diff --git a/t3/faiss_t3/baseline_plots/text2image-1B-r-vs-p.png b/t3/faiss_t3/baseline_plots/text2image-1B-r-vs-p.png deleted file mode 100644 index bc0aa5afe..000000000 Binary files a/t3/faiss_t3/baseline_plots/text2image-1B-r-vs-p.png and /dev/null differ diff --git a/t3/faiss_t3/baseline_plots/text2image-1B-r-vs-t.png b/t3/faiss_t3/baseline_plots/text2image-1B-r-vs-t.png deleted file mode 100644 index e0be37dbf..000000000 Binary files a/t3/faiss_t3/baseline_plots/text2image-1B-r-vs-t.png and /dev/null differ diff --git a/t3/faiss_t3/cost/AdvantechSky6200.pdf b/t3/faiss_t3/cost/AdvantechSky6200.pdf deleted file mode 100644 index c0012482d..000000000 Binary files a/t3/faiss_t3/cost/AdvantechSky6200.pdf and /dev/null differ diff --git a/t3/faiss_t3/cost/GPU.pdf b/t3/faiss_t3/cost/GPU.pdf deleted file mode 100644 index 01871eb40..000000000 Binary files a/t3/faiss_t3/cost/GPU.pdf and /dev/null differ diff --git a/t3/faiss_t3/cost/RAM.pdf b/t3/faiss_t3/cost/RAM.pdf deleted file mode 100644 index a65b66027..000000000 Binary files a/t3/faiss_t3/cost/RAM.pdf and /dev/null differ diff --git a/t3/faiss_t3/cost/SSD.pdf b/t3/faiss_t3/cost/SSD.pdf deleted file mode 100644 index 2010b2910..000000000 Binary files a/t3/faiss_t3/cost/SSD.pdf and /dev/null differ diff --git a/t3/faiss_t3/faiss-gpu_requirements.txt b/t3/faiss_t3/faiss-gpu_requirements.txt deleted file mode 100644 index 5fe6dabde..000000000 --- a/t3/faiss_t3/faiss-gpu_requirements.txt +++ /dev/null @@ -1,11 +0,0 @@ -ansicolors==1.1.8 -docker==2.6.1 -h5py==2.10.0 -matplotlib -numpy -pyyaml==5.1 -psutil==5.6.6 -scipy -scikit-learn -jinja2==2.10.1 -pandas diff --git a/t3/gemini/.gitignore b/t3/gemini/.gitignore deleted file mode 100644 index 031f4fc40..000000000 --- a/t3/gemini/.gitignore +++ /dev/null @@ -1,3 +0,0 @@ -1b -centroids_2m -records_weights diff --git a/t3/gemini/README.md b/t3/gemini/README.md deleted file mode 100644 index 19e929900..000000000 --- a/t3/gemini/README.md +++ /dev/null @@ -1,65 +0,0 @@ -# Gemini - -This README contains information required for T3 Track submissions. - -## Hardware Configuration And Cost - -|Part |Model |No. |Unit Price |Total Price| -|-----------------------------|--------------------------------------------------|----|------------------------------------|-----------| -|Chassis and Motherboard |[Advantech Sky-6200 2U](cost/AdvantechSky6200.pdf)| 1|[5572.42](cost/AdvantechSky6200.pdf)| 5572.42| -|RAM |[Advantech 64GB Memory](cost/RAM.pdf) | 24| [409.99](cost/RAM.pdf)| 9839.76| -|SSD |[2TB SeaGate](cost/SSD.pdf) | 1| [334.48](cost/SSD.pdf)| 334.48| -|APU |[LedaE APU](cost/APU.pdf) | 4| [35000.00](cost/APU.pdf)| 140000.00| -|GPU |[NVidia V100](cost/GPU.pdf) | 1| [9899.00](cost/GPU.pdf)| 9899.00| -|Total | | 1| | 165645.66| - -## Hardware Access - -This hardware is maintained by the GSI Technology, one of the competition organizers. Please send an email to big-ann-organizers@googlegroups.com or gwilliams@gsitechnology.com to get access to a system or see the section below to build your own system. - -## No Source Code Declarations - -This submission requires the following software components where source-code is not available and/or not part of the source-code for this submission: -* NVidia docker container runtime ( https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html ) -* CUDA 11 libraries and host drivers -* NVidia V100 firmware compatible with CUDA 11 -* Gemini Software system software and host drivers (version TBD) -* Gemini pyGSL vector search library (version TBD) -* LedaE PCIe board firmware (version TBD) - -## Hardware Setup And Software Installation - -### Prerequisites - -* Linux Ubuntu 18.04 -* Python 3.69 -* Python package requirements in [requirements.txt](requirements.txt) -* CUDA 11.0 -* The NVidia docker container runtime ( https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html ) -* Gemini system software and host drivers ( please follow the instructions that came with your Leda hardware.) -* This cloned project repository - -### Test Your Leda Hardware - -At the command line run the following diagnostic program to make sure your boards are operational: - -```dev_diagnostic --iter 1 --run-all-cards 1``` - -### pyGSL Libraries - -Download the pyGSL libraries from [here](https://storage.googleapis.com/bigann/gemini/gsl_resources.tar.gz.1) and unpack into the toplevel directory of the cloned repository. - -### Competition Index Files - -Currently the competition index files must be downloaded and installed manually. - -Download all the index files from [here](tbd) (TBD) and unpack into the cloned repo's data directory. - -## Run The Competition Algorithm - -In the top-level directory of the cloned repository, run the following command: - -```t3/gemini/run_bin_python.sh``` - -Note that it will take a few minutes for all the index files to load, so be patient. - diff --git a/t3/gemini/algos.yaml b/t3/gemini/algos.yaml deleted file mode 100644 index e590bf96f..000000000 --- a/t3/gemini/algos.yaml +++ /dev/null @@ -1,25 +0,0 @@ -deep-1B: - gemini-t3: - docker-tag: billion-scale-benchmark-faissconda - module: benchmark.algorithms.gemini - constructor: GeminiT3 - base-args: ["@metric"] - run-groups: - base: - args: | - [ - "{'nbits': 512, 'qbits':768, 'nlist':2097152, 'nt':83886080, 'num_apuc':3, 'f16':True }" - ] - query-args: | - [ - "{'nprobe': 800, 'nprobe_refine': 480, 'hamming_k': 5000, 'average_clstr_size_factor': 0.0}", - "{'nprobe': 800, 'nprobe_refine': 480, 'hamming_k': 2000, 'average_clstr_size_factor': 0.0}", - "{'nprobe': 800, 'nprobe_refine': 480, 'hamming_k': 1000, 'average_clstr_size_factor': 0.0}", - "{'nprobe': 800, 'nprobe_refine': 480, 'hamming_k': 500, 'average_clstr_size_factor': 0.0}", - "{'nprobe': 800, 'nprobe_refine': 480, 'hamming_k': 250, 'average_clstr_size_factor': 0.0}", - "{'nprobe': 800, 'nprobe_refine': 400, 'hamming_k': 1000, 'average_clstr_size_factor': 0.0}", - "{'nprobe': 800, 'nprobe_refine': 300, 'hamming_k': 1000, 'average_clstr_size_factor': 0.0}", - "{'nprobe': 700, 'nprobe_refine': 380, 'hamming_k': 1000, 'average_clstr_size_factor': 0.0}", - "{'nprobe': 600, 'nprobe_refine': 280, 'hamming_k': 1000, 'average_clstr_size_factor': 0.0}", - "{'nprobe': 500, 'nprobe_refine': 180, 'hamming_k': 1000, 'average_clstr_size_factor': 0.0}" - ] diff --git a/t3/gemini/buildidx/build_index.py b/t3/gemini/buildidx/build_index.py deleted file mode 100644 index 38c2fe16c..000000000 --- a/t3/gemini/buildidx/build_index.py +++ /dev/null @@ -1,167 +0,0 @@ -import os -import numpy as np -import h5py -import faiss -import sys - -# -# Get parameters -# -dbase = "deep-1B" -nlist = 2097152 -qbits = 768 -nbits = 512 -nt = 83886080 -is_f16 = True -num_records = 1000000000 -output_dir = "/mnt/localdisk2/George/Projects/BigANN/gemini/indexes" - -# -# Form the path to the source files -# -resources_path = '../../t3/gsi/' -case_dir = '1b/' -resources_path_case = f'{resources_path}{case_dir}' -centroids_dirs = { 524288: 'centroids_512k/', 2097152: 'centroids_2m/', 4194304: 'centroids_4m/'} -num_centroids_dir = centroids_dirs[nlist] -fp_quantizer_file_name = f'{resources_path}{num_centroids_dir}Deep1B.nt{nt}.nlist{nlist}.quantizer' -records_encoding_file_name = f'{resources_path}records_weights/records_weights.bits{nbits}.npy' -centroids_encoding_file_name = f'{resources_path}{num_centroids_dir}centroids_weights.nt{nt}.nlist{nlist}.nbits{nbits}.npy' -index_file_name = f'{resources_path_case}Deep1B.ivfbinnh.nt{nt}.nlist{nlist}.nb{num_records}.bits{qbits}.index' -db_path = f'{resources_path_case}fdb.npy' - -# -# Validate all the source files exists -# -print('********************** Paths ***************************') -print('fp_quantizer_file_name =', fp_quantizer_file_name) -if not os.path.isfile(fp_quantizer_file_name): - raise FileNotFoundError(fp_quantizer_file_name) -print('records_encoding_file_name =', records_encoding_file_name) -if not os.path.isfile(records_encoding_file_name): - raise FileNotFoundError(records_encoding_file_name) -print('centroids_encoding_file_name =', centroids_encoding_file_name) -if not os.path.isfile(centroids_encoding_file_name): - raise FileNotFoundError(centroids_encoding_file_name) -print('index_file_name =', index_file_name) -if not os.path.isfile(index_file_name): - raise FileNotFoundError(index_file_name) -print('db_path =', db_path) -if not os.path.isfile(db_path): - raise FileNotFoundError(db_path) -print('********************************************************') - -# -# Form the path to the output index file -# -output_file = "%s.nbits=%d,qbits=%d,nlist=%d,nt=%d,nb=%d,fp16=%s.geminiindex" % ( dbase, nbits, qbits, nlist, nt, num_records, str(is_f16)) -full_path = os.path.join( output_dir, output_file ) -print("Checking if index file already exists (%s)" % full_path) -if os.path.exists(full_path): - raise Exception("gemini index file already exists (%s)" % full_path ) - -# -# Load the source files -# -def convert_index_to_cluster_and_ids_lists(index, nbits): - cluster_list = np.empty(index.invlists.nlist, dtype=object) - ids_list = np.empty(index.invlists.nlist, dtype=object) - - zero_count = 0 - - for i in range(index.invlists.nlist): - list_sz = index.invlists.list_size(i) - - if list_sz == 0: - zero_count = zero_count + 1 - ids = None - else: - ids_ptr = index.invlists.get_ids(i) - ids = np.array(faiss.rev_swig_ptr(ids_ptr, list_sz)).reshape(-1, 1).astype(np.uint32) # GSL requires a 2d arrray for some reason - index.invlists.release_ids(ids_ptr) - # index.invlists.release_ids(list_sz, ids_ptr) - ids_list[i] = ids - - codes_ptr = index.invlists.get_codes(i) - codes = np.array(faiss.rev_swig_ptr(codes_ptr, list_sz * nbits // 8)).reshape(list_sz, nbits//8) - index.invlists.release_codes(codes_ptr) - # index.invlists.release_codes(list_sz * nbits // 8, codes_ptr) - cluster_list[i] = codes - - print('zero_count =', zero_count) - return cluster_list, ids_list - -def get_cluster_and_ids_lists(index, nbits): - print('Creating cluster + ids lists...') - ret = convert_index_to_cluster_and_ids_lists(index, nbits) - return ret - -print("Reading binary index %s" % index_file_name) -index = faiss.read_index_binary(index_file_name) - -print("Extracting binary cluster list and ids") -# cluster_list, ids_list = get_cluster_and_ids_lists(self.index, nbits) -cluster_list, ids_list = get_cluster_and_ids_lists(index, qbits) -print(type(cluster_list), type(ids_list), cluster_list[0].shape, cluster_list[0].dtype, cluster_list[1].shape, ids_list[0].shape, ids_list[0].dtype) - -print("Extracting binary quantizer and centroids") -quantizer = faiss.downcast_IndexBinary(index.quantizer) -centroids = faiss.vector_to_array(quantizer.xb) -centroids = np.reshape(centroids, (quantizer.ntotal, quantizer.d//8)) -print('Got centroids (binary):', centroids.shape, centroids.dtype) - -print("Extracting float quantizer and centroids") -l2_quantizer = faiss.read_index(fp_quantizer_file_name) -l2_centroids = faiss.vector_float_to_array(l2_quantizer.xb) -l2_centroids = np.reshape(l2_centroids, (nlist, l2_quantizer.d)) -print('Got centroids (float):', l2_centroids.shape, l2_centroids.dtype) - -print("Reading centroids encoding file") -centroids_encoding_np = np.load(centroids_encoding_file_name) - -print("Reading records encoding file") -records_encoding_np = np.load(records_encoding_file_name) - -# -# Create the monolithic index and save -# -def add_ndarray_with_type_object( h5f, name, arr ): - print("arrs", arr[0].shape, arr[0].dtype, arr[1].shape, arr[0].dtype) - dt = h5py.vlen_dtype(arr[0].dtype) - dset = h5f.create_dataset(name, arr.shape, dtype=dt) - print("dt", dt, dset) - for i in range( arr.shape[0] ): - item = arr[i] - #print("item",i, type(item)), - if item!=None: dset[i] = item.reshape(-1) - else: dset[i] = item - -print("Creating (h5py) index file at %s" % full_path) -h5f = h5py.File(full_path, 'w') - -print("Adding cluster_list") -#h5f.create_dataset('cluster_list', data=cluster_list) -add_ndarray_with_type_object( h5f, "cluster_list", cluster_list ) - -print("Adding ids_list") -#h5f.create_dataset('ids_list', data=ids_list) -add_ndarray_with_type_object( h5f, "ids_list", ids_list ) - -print("Adding binary centroids") -h5f.create_dataset('centroids', data=centroids) - -print("Adding float centroids") -h5f.create_dataset('l2_centroids', data=l2_centroids) - -print("Adding centroids_encoding") -h5f.create_dataset('centroids_encoding_np', data=centroids_encoding_np) - -print("Adding records_encoding") -h5f.create_dataset('records_encoding_np', data=records_encoding_np) - -print("Adding dataset") -h5f.create_dataset(' -print("Finalizing and closing index.") -h5f.close() - -printf("Done. Wrote index at %s" % full_path ) diff --git a/t3/gemini/buildidx/htest.py b/t3/gemini/buildidx/htest.py deleted file mode 100644 index 8d6888229..000000000 --- a/t3/gemini/buildidx/htest.py +++ /dev/null @@ -1,101 +0,0 @@ -import h5py -import numpy as np -import sys - -if True: - arr = np.empty( (2,), dtype=object) - a = np.random.rand( 3,4 ) - b = np.random.rand( 2,4 ) - arr[0] = a - arr[1] = b - print("arr", arr) - - f = h5py.File('/tmp/foo.hdf5','w') - dt = h5py.vlen_dtype(a.dtype) - dset = f.create_dataset('vlen_int', arr.shape, dtype=dt) - - a = a.reshape( -1 ) - dset[0] = a - - b = b.reshape( -1 ) - dset[1] = b - - farr = dset[0:] - print(farr.shape, farr.dtype) - - narr = np.empty( (2, ), dtype=object) - for i in range(2): - narr[i] = dset[i].reshape( -1, 4 ) - - print("narr", narr) - sys.exit(0) - -if True: - f = h5py.File('/tmp/foo.hdf5','w') - dt = h5py.vlen_dtype(np.dtype('int32')) - dset = f.create_dataset('vlen_int', (2,), dtype=dt) - dset[0] = [1,2,3,4] - dset[1] = [1,2,3,4,5] - print(type(dset[0]),dset[0].dtype,dset[0].shape) - arr = dset[0] - a = arr.reshape( (2,2) ) - print("a",a, a.shape) - print(dset[0:2]) - print(np.reshape(dset[0],(None,2))) - print(dset[1]) - sys.exit(0) - -if False: - f = h5py.File('/tmp/foo.hdf5','w') - float32_t = h5py.special_dtype(vlen=np.dtype('float32')) - evolutionary_ = f.create_dataset('evolutionary', shape=(1, 3,), maxshape=(None, 3,), dtype=float32_t) - a = np.random.randn(1, 3, 4) - b = np.random.randn(1, 3, 6) - - evolutionary_[0] = a - print('evo[0] is \n', evolutionary_.value) - - evolutionary_.resize(3, axis=0) - evolutionary_[1] = b - print('evo[0,1,2] is\n', evolutionary_.value) - - sys.exit(0) - -if True: - f = h5py.File('/tmp/foo.hdf5','w') - dt = h5py.special_dtype(vlen=np.dtype('float32')) - dset = f.create_dataset('vlen_int', shape=(1,3,), maxshape=(None,3,), dtype=dt) - dset[0] = np.random.randn( 1, 3, 3 ) - dset.resize(3, axis=0) - dset[1] = np.random.randn( 1, 3, 1 ) - print( dset[0:] ) - - sys.exit(0) - -arr = np.empty(1, dtype=object) -print(type(arr), arr.shape, arr.dtype) - -a = np.ones( (2,2 )) -print(type(a), a.shape, a.dtype) -arr[0] = a - -h5f = h5py.File("/tmp/test", 'w') - -print("Adding cluster_list") -#h5f.create_dataset('cluster_list', data=arr) - -def add_ndarray_with_type_object( h5f, name, arr ): - print(type(arr), type(arr[0]), arr) - dt = arr[0].dtype - print("add", dt, arr.shape) - v = h5py.vlen_dtype(dt) - dset = h5f.create_dataset(name, arr.shape, dtype=dt) - for i in range( arr.shape[0] ): - dset[i] = arr[i] - print( type(dset) ) - print( dset ) - print( dset[0:] ) - -add_ndarray_with_type_object( h5f, "cluster_list", arr ) - -h5f.close() diff --git a/t3/gemini/buildidx/run_bin_build_index.sh b/t3/gemini/buildidx/run_bin_build_index.sh deleted file mode 100755 index 0d87d0953..000000000 --- a/t3/gemini/buildidx/run_bin_build_index.sh +++ /dev/null @@ -1,3 +0,0 @@ -#!/bin/bash - -PATH=/usr/bin:$PATH python3 build_index.py diff --git a/t3/gemini/buildidx/test.py b/t3/gemini/buildidx/test.py deleted file mode 100644 index 31044945c..000000000 --- a/t3/gemini/buildidx/test.py +++ /dev/null @@ -1,63 +0,0 @@ -import faiss -import numpy as np - -qd=768 -d=512 -nlist=5 - -nb=1000 -db = np.empty((nb, d // 8), dtype='uint8') - -quantizer = faiss.IndexBinaryFlat( d ) -index = faiss.IndexBinaryIVF( quantizer, d, nlist ) -index.train(db) -index.add(db) - -def convert_index_to_cluster_and_ids_lists(index, nbits): - cluster_list = np.empty(index.invlists.nlist, dtype=object) - ids_list = np.empty(index.invlists.nlist, dtype=object) - - zero_count = 0 - - for i in range(index.invlists.nlist): - list_sz = index.invlists.list_size(i) - - if list_sz == 0: - zero_count = zero_count + 1 - ids = None - else: - ids_ptr = index.invlists.get_ids(i) - ids = np.array(faiss.rev_swig_ptr(ids_ptr, list_sz)).reshape(-1, 1).astype(np.uint32) # GSL requires a 2d arrray for some reason - index.invlists.release_ids(ids_ptr) - #GW index.invlists.release_ids(list_sz, ids_ptr) - ids_list[i] = ids - - codes_ptr = index.invlists.get_codes(i) - codes = np.array(faiss.rev_swig_ptr(codes_ptr, list_sz * nbits // 8)).reshape(list_sz, nbits//8) - index.invlists.release_codes(codes_ptr) - #GW index.invlists.release_codes(list_sz * nbits // 8, codes_ptr) - cluster_list[i] = codes - - print('zero_count =', zero_count) - return cluster_list, ids_list - -cls, ids = convert_index_to_cluster_and_ids_lists(index,d) -print("cls", cls) -print("ids", ids) - -# Querying the index -nq = 10 -queries = np.empty((nq, d // 8), dtype='uint8') -print("queries", queries) -k = 1 -D, I = index.search(queries, k) -print("di",D,I) - - -quantizer = faiss.downcast_IndexBinary(index.quantizer) -print("Quantizer", type(quantizer)) -centroids = faiss.vector_to_array(quantizer.xb) -print("Centroids", type(centroids), centroids.shape) -centroids = np.reshape(centroids, (quantizer.ntotal, quantizer.d//8)) -print("Centroids", type(centroids), centroids.shape) -print('centroids (binary):', centroids.shape, centroids.dtype) diff --git a/t3/gemini/cost/AdvantechSky6200.pdf b/t3/gemini/cost/AdvantechSky6200.pdf deleted file mode 100644 index c0012482d..000000000 Binary files a/t3/gemini/cost/AdvantechSky6200.pdf and /dev/null differ diff --git a/t3/gemini/cost/GPU.pdf b/t3/gemini/cost/GPU.pdf deleted file mode 100644 index 01871eb40..000000000 Binary files a/t3/gemini/cost/GPU.pdf and /dev/null differ diff --git a/t3/gemini/cost/RAM.pdf b/t3/gemini/cost/RAM.pdf deleted file mode 100644 index 168285fbd..000000000 Binary files a/t3/gemini/cost/RAM.pdf and /dev/null differ diff --git a/t3/gemini/cost/SSD.pdf b/t3/gemini/cost/SSD.pdf deleted file mode 100644 index 2010b2910..000000000 Binary files a/t3/gemini/cost/SSD.pdf and /dev/null differ diff --git a/t3/gemini/requirements.txt b/t3/gemini/requirements.txt deleted file mode 100644 index f62d93c6a..000000000 --- a/t3/gemini/requirements.txt +++ /dev/null @@ -1,12 +0,0 @@ -numpy==1.16.0 -scipy==1.0.0 -scikit-learn==0.19.1 -faiss==1.5.3 -docker==2.6.1 -psutil==5.6.6 -h5py==2.10.0 -ansicolors==1.1.8 -tqdm==4.62.2 -dataclasses==0.8 -pyyaml -matplotlib diff --git a/t3/gemini/run_bin_python.sh b/t3/gemini/run_bin_python.sh deleted file mode 100755 index c5a21a8ca..000000000 --- a/t3/gemini/run_bin_python.sh +++ /dev/null @@ -1,18 +0,0 @@ -#!/bin/bash - -PATH=/usr/bin:$PATH which python3 -PATH=/usr/bin:$PATH python3 -c "import numpy;print('numpy',numpy.version.version)" -PATH=/usr/bin:$PATH pip3 show numpy - -PATH=/usr/bin:$PATH python3 -c "import scipy;print('scipy',scipy.version.version)" -PATH=/usr/bin:$PATH pip3 show scipy - -PATH=/usr/bin:$PATH python3 -c "import sklearn;print('sklearn',sklearn.__version__)" -PATH=/usr/bin:$PATH pip3 show sklearn - -PATH=/usr/bin:$PATH python3 -c "import faiss;print('faiss',faiss.__version__)" -PATH=/usr/bin:$PATH pip3 show faiss - -PATH=/usr/bin:$PATH LD_LIBRARY_PATH="./gsl_resources:$HOME/.local/lib/python3.6/site-packages/faiss" PYTHONPATH="./gsl_resources:$HOME/.local/lib/python3.6/site-packages/faiss" python3 run.py --t3 --nodocker --definitions t3/gemini/algos.yaml --dataset deep-1B --runs 1 - -#PATH=/usr/bin:$PATH LD_LIBRARY_PATH="/home/silo/BigANN/big-ann-benchmarks/gsl_resources:/home/silo/.local/lib/python3.6/site-packages/faiss" PYTHONPATH="/home/silo/BigANN/big-ann-benchmarks/gsl_resources:/home/silo/.local/lib/python3.6/site-packages/faiss" python3 run.py --t3 --nodocker --definitions t3/gemini/algos.yaml --dataset deep-1B --runs 1 diff --git a/t3/gemini/run_conda_python.sh b/t3/gemini/run_conda_python.sh deleted file mode 100755 index 24c7d2da3..000000000 --- a/t3/gemini/run_conda_python.sh +++ /dev/null @@ -1,20 +0,0 @@ -#!/bin/bash -#set -x - -#conda activate bigann-silo-py369 -which python3 -which pip3 - -python3 -c "import numpy;print('numpy',numpy.version.version)" -pip3 show numpy - -python3 -c "import scipy;print('scipy',scipy.version.version)" -pip3 show scipy - -python3 -c "import sklearn;print('sklearn',sklearn.__version__)" -pip3 show sklearn - -python3 -c "import faiss;print('faiss',faiss.__version__)" -pip3 show faiss - -LD_LIBRARY_PATH=./gsl_resources PYTHONPATH=./gsl_resources python3 run.py --t3 --nodocker --definitions t3/gemini/algos.yaml --dataset deep-1B --runs 1 diff --git a/templates/chartjs.template b/templates/chartjs.template index 466e3555b..2199afd90 100644 --- a/templates/chartjs.template +++ b/templates/chartjs.template @@ -1,102 +1,102 @@ -

{{xlabel}}/{{ylabel}}

-
- - -
- {% if args.latex %} -
-
- -
-
- - - {% endif %} +

{{xlabel}}/{{ylabel}}

+
+ + +
+ {% if args.latex %} +
+
+ +
+
+ + + {% endif %} diff --git a/templates/detail_page.html b/templates/detail_page.html index ce6193c2f..2f65c4b6e 100644 --- a/templates/detail_page.html +++ b/templates/detail_page.html @@ -1,23 +1,23 @@ -{% extends "general.html" %} - +{% extends "general.html" %} + diff --git a/templates/latex.template b/templates/latex.template index 438353418..c64643dba 100644 --- a/templates/latex.template +++ b/templates/latex.template @@ -1,30 +1,30 @@ - -\begin{figure} - \centering - \begin{tikzpicture} - \begin{axis}[ - xlabel={ {{xlabel}} }, - ylabel={ {{ylabel}} }, - ymode = log, - yticklabel style={/pgf/number format/fixed, - /pgf/number format/precision=3}, - legend style = { anchor=west}, - cycle list name = black white - ] - {% for algo in plot_data %} - {% if algo.scatter %} - \addplot [only marks] coordinates { - {% else %} - \addplot coordinates { - {% endif %} - {% for coord in algo.coords %} - ({{ coord[0]}}, {{ coord[1] }}) - {% endfor %} - }; - \addlegendentry{ {{algo.name}} }; - {% endfor %} - \end{axis} - \end{tikzpicture} - \caption{ {{caption}} } - \label{} -\end{figure} + +\begin{figure} + \centering + \begin{tikzpicture} + \begin{axis}[ + xlabel={ {{xlabel}} }, + ylabel={ {{ylabel}} }, + ymode = log, + yticklabel style={/pgf/number format/fixed, + /pgf/number format/precision=3}, + legend style = { anchor=west}, + cycle list name = black white + ] + {% for algo in plot_data %} + {% if algo.scatter %} + \addplot [only marks] coordinates { + {% else %} + \addplot coordinates { + {% endif %} + {% for coord in algo.coords %} + ({{ coord[0]}}, {{ coord[1] }}) + {% endfor %} + }; + \addlegendentry{ {{algo.name}} }; + {% endfor %} + \end{axis} + \end{tikzpicture} + \caption{ {{caption}} } + \label{} +\end{figure} diff --git a/templates/summary.html b/templates/summary.html index f83a7e1ea..271739014 100644 --- a/templates/summary.html +++ b/templates/summary.html @@ -1,14 +1,14 @@ -{% extends "general.html" %} -{% block content %} -
-

Billion-Scale Approximate Nearest Neighbor Search Challenge: NeurIPS'21 competition track

- -

- - - -{% endblock %} +{% extends "general.html" %} +{% block content %} +
+

Billion-Scale Approximate Nearest Neighbor Search Challenge: NeurIPS'21 competition track

+ +

+ + + +{% endblock %} diff --git a/tests/recall_tests.py b/tests/recall_tests.py deleted file mode 100755 index 95099a66d..000000000 --- a/tests/recall_tests.py +++ /dev/null @@ -1,243 +0,0 @@ -import numpy as np -import sys - -from benchmark.plotting.metrics import get_recall_values -from benchmark.datasets import DATASETS - -ASSERT= True # Stop unit tests on first failure - -GT_MIN_SIZE = 20 # Require ground truth with at least this length for each query - -def main_tests(): - # - # test recall computation on fake responses - # - - def test_recall( true_ids, true_dists, run_ids, count, expected_no_ties, expected_with_ties ): - '''This function will test the two forms of recall (with and without considering ties.)''' - - # compute recall, don't consider ties - recall = get_recall_values( (true_ids, true_dists), run_ids, count, False) - expected = 1.0 - print("compute recall(don't consider ties)=%f" % recall[0], "expected recall=%f" % expected_no_ties) - if ASSERT: - assert recall[0]==expected_no_ties - print("passed") - - # compute recall, consider ties - recall = get_recall_values( (true_ids, true_dists), run_ids, count, True) - expected = 1.0 - print("compute recall(consider ties)=%f num_queries_with_ties=%d" % (recall[0], recall[3]), "expected recall=%f" % expected_with_ties) - if ASSERT: - assert recall[0]==expected_with_ties - print("passed") - - print() - - print("TEST: fake query response with no distance ties, 1 query and k=3") - true_ids = np.array([ [ 0, 1, 2 ] ]) - true_dists = np.array([ [ 0.0, 1.0, 2.0 ] ]) - run_ids = np.array([ [ 0, 1, 2 ] ]) - count=3 - test_recall( true_ids, true_dists, run_ids, count, 1.0, 1.0 ) - - print("TEST: fake query response with no distance ties but not 1.0 recall, 1 query and k=3") - true_ids = np.array([ [ 0, 1, 2 ] ]) - true_dists = np.array([ [ 0.0, 1.0, 2.0 ] ]) - run_ids = np.array([ [ 0, 1, 3 ] ]) - count=3 - test_recall( true_ids, true_dists, run_ids, count, 2.0/3.0, 2.0/3.0 ) - - print("TEST: fake query response with no ties, 2 queries and k=3") - true_ids = np.array([ [ 0, 1, 2 ], [ 2, 1, 0 ] ]) - true_dists = np.array([ [ 0.0, 1.0, 2.0 ], [ 0.0, 1.0, 2.0 ] ]) - run_ids = np.array([ [ 0, 1, 2 ], [ 2, 1, 0 ] ]) - count=3 - test_recall( true_ids, true_dists, run_ids, count, 1.0, 1.0 ) - - print("TEST: fake query response with no distance ties, 1 query, k=3, GT array is larger than run array") - true_ids = np.array([ [ 0, 1, 2, 3 ] ]) - true_dists = np.array([ [ 0.0, 1.0, 2.0, 3.0 ] ]) - run_ids = np.array([ [ 0, 1, 2 ] ]) - count=3 - print("yuk true_ids=", true_ids.shape, "run_ids=", run_ids.shape) - test_recall( true_ids, true_dists, run_ids, count, 1.0, 1.0 ) - - print("TEST: fake query response with an out-of-bounds distance ties, 1 query, k=3, GT array is larger than run array.") - true_ids = np.array([ [ 0, 1, 2, 3 ] ]) - true_dists = np.array([ [ 0.0, 1.0, 2.0, 2.0 ] ]) - run_ids = np.array([ [ 0, 1, 2 ] ]) - count=3 - test_recall( true_ids, true_dists, run_ids, count, 1.0, 1.0 ) - - # this is from bigann GT and query set. The GT arrays are size=11 but run array is 10 and there are no ties to consider - print("TEST: from bigann-1B...") - true_ids = np.array([ [937541801, 221456167, 336118969, 971823307, 267986685, 544978851, 815975675, 615142927, 640142873, 994367459, 504814] ] ) - true_dists = np.array([ [55214., 58224., 58379., 58806., 59251., 59256., 60302., 60573., 60843., 60950., 61125.] ] ) - run_ids = np.array([ [221456167, 336118969, 971823307, 640142873, 994367459, 504814, 87356234, 628179290, 928121617, 397551598 ] ] ) - count=10 - test_recall( true_ids, true_dists, run_ids, count, 0.5, 0.5 ) - - print("TEST: fake query response with ties at beginning, 2 queries and k=3") - true_ids = np.array([ [ 0, 1, 2, 3 ], [ 3, 2, 1, 0 ] ]) - true_dists = np.array([ [ 0.0, 0.0, 1.0, 2.0 ], [ 0.0, 0.0, 1.0, 2.0 ] ]) - run_ids = np.array([ [ 0, 1, 3 ], [ 3, 2, 0 ] ]) - count=3 - test_recall( true_ids, true_dists, run_ids, count, np.mean([2.0,2.0])/count, np.mean([2.0,2.0])/count) - - print("TEST: fake query response with ties at beginning and ties have small diff, 2 queries and k=3") - true_ids = np.array([ [ 0, 1, 2, 3 ], [ 3, 2, 1, 0 ] ]) - true_dists = np.array([ [ 0.0, 0.0+1e-6-1e-7, 1.0, 2.0 ], [ 0.0, 0.0+1e-6-1e-7, 1.0, 2.0 ] ]) - run_ids = np.array([ [ 0, 1, 3 ], [ 3, 2, 0 ] ]) - count=3 - test_recall( true_ids, true_dists, run_ids, count, np.mean([2.0,2.0])/count, np.mean([2.0,2.0])/count) - - print("TEST: fake query response with possible ties at beginning but diff is just beyond the 1e-6 threshold, 2 queries and k=3") - true_ids = np.array([ [ 0, 1, 2, 3 ], [ 3, 2, 1, 0 ] ]) - true_dists = np.array([ [ 0.0, 0.0+1e-6, 1.0, 2.0 ], [ 0.0, 0.0+1e-6, 1.0, 2.0 ] ]) - run_ids = np.array([ [ 0, 1, 3 ], [ 3, 2, 0 ] ]) - count=3 - test_recall( true_ids, true_dists, run_ids, count, np.mean([2.0,2.0])/count, np.mean([2.0,2.0])/count ) - - print("TEST: fake query response with ties in middle, 2 queries and k=3") - true_ids = np.array([ [ 0, 1, 2, 3 ], [ 3, 2, 1, 0 ] ]) - true_dists = np.array([ [ 0.0, 1.0, 1.0, 2.0 ], [ 0.0, 1.0, 1.0, 2.0 ] ]) - run_ids = np.array([ [ 0, 1, 3 ], [ 3, 2, 0 ] ]) - count=3 - test_recall( true_ids, true_dists, run_ids, count, np.mean([2.0,2.0])/count, np.mean([2.0,2.0])/count) - - print("TEST: fake query response with ties at count-1 and 1 tie after, 2 queries and k=3") - true_ids = np.array([ [ 0, 1, 2, 3 ], [ 3, 2, 1, 0 ] ]) - true_dists = np.array([ [ 0.0, 1.0, 2.0, 2.0 ], [ 0.0, 1.0, 2.0, 2.0 ] ]) - run_ids = np.array([ [ 0, 1, 3 ], [ 3, 2, 0 ] ]) - count=3 - test_recall( true_ids, true_dists, run_ids, count, np.mean([2.0, 2.0])/float(count), np.mean([3.0, 3.0])/float(count) ) - - print("TEST: fake query response with ties at count-1 and 1 tie after and 1 after that that is cloe, 2 queries and k=3") - true_ids = np.array([ [ 0, 1, 2, 3 ], [ 3, 2, 1, 0 ] ]) - true_dists = np.array([ [ 0.0, 1.0, 2.0, 2.0 ], [ 0.0, 1.0, 2.0, 2.0 ] ]) - run_ids = np.array([ [ 0, 1, 3 ], [ 3, 2, 0 ] ]) - count=3 - test_recall( true_ids, true_dists, run_ids, count, np.mean([2.0, 2.0])/float(count), np.mean([3.0, 3.0])/float(count) ) - - print("TEST: fake query response with ties at count-1 and several close ties after, 2 queries and k=3") - true_ids = np.array([ [ 0, 1, 2, 3, 4, 5 ], [ 5, 4, 3, 2, 1, 0 ] ]) - true_dists = np.array([ [ 0.0, 1.0, 2.0, 2.0, 2.0+1e-6-1e-7, 2.0+1e-6 ], [ 0.0, 1.0, 2.0, 2.0, 2.0+1e-6-1e-7, 2.0+1e-6 ] ]) - run_ids = np.array([ [ 0, 1, 4 ], [ 5, 4, 0 ] ]) - count=3 - test_recall( true_ids, true_dists, run_ids, count, np.mean([2.0,2.0])/count, np.mean([3.0, 2.0])/float(count) ) - - print("TEST: fake query response with two independent tie groups, 2 queries and k=3") - true_ids = np.array([ [ 0, 1, 2, 3, 4, 5 ], [ 5, 4, 3, 2, 1, 0 ] ]) - true_dists = np.array([ [ 0.0, 0.0, 2.0, 3.0, 3.0, 4.0 ], [ 0.0, 0.0, 2.0, 3.0, 3.0, 4.0 ] ]) - run_ids = np.array([ [ 0, 1, 5 ], [ 5, 4, 1 ] ]) - count=3 - test_recall( true_ids, true_dists, run_ids, count, np.mean([2.0, 2.0])/float(count), np.mean([2.0, 2.0])/float(count) ) - - print("TEST: fake query response with two independent tie groups, 2 queries and k=4") - true_ids = np.array([ [ 0, 1, 2, 3, 4, 5, 6 ], [ 6, 5, 4, 3, 2, 1, 0 ] ]) - true_dists = np.array([ [ 0.0, 0.0, 2.0, 3.0, 3.0, 4.0, 5.0 ], [ 0.0, 0.0, 2.0, 3.0, 3.0, 4.0, 5.0 ] ]) - run_ids = np.array([ [ 0, 1, 5, 7 ], [ 5, 4, 1, 7 ] ]) - count=4 - test_recall( true_ids, true_dists, run_ids, count, np.mean([2.0, 2.0])/float(count), np.mean([2.0, 2.0])/float(count) ) - - # - # dataset tests - # - def test_GT_monotonicity( dset, increasing=True ): - print("TEST: %s, checking GT distances monotonicity" % dset) - dataset = DATASETS[dset]() - gt = dataset.get_groundtruth() - if ASSERT: assert len(gt)==2 - true_ids = gt[0] - true_dists = gt[1] - if ASSERT: - assert true_ids.shape[1]==true_dists.shape[1] - assert true_ids.shape[1]>=GT_MIN_SIZE - assert true_dists.shape[1]>=GT_MIN_SIZE - func = monotone_increasing if increasing else monotone_decreasing - for i in range(true_dists.shape[0]): - mtest = func(true_dists[i]) - if ASSERT: assert mtest==True - print() - - print("TEST: sanity check the monotone functions") - mtest = monotone_increasing([0,1,2,3,4,5]) - if ASSERT: assert mtest==True - mtest = monotone_increasing([0,0,0,3,4,5]) - if ASSERT: assert mtest==True - mtest = monotone_increasing([3,4,5,4,3,4,5]) - if ASSERT: assert mtest==False - mtest = monotone_increasing([5,4,4,3,2,1]) - if ASSERT: assert mtest==False - print() - - # check GT dist increasing monotonicity for each knn dataset - test_GT_monotonicity( "bigann-1B" ) - test_GT_monotonicity( "deep-1B" ) - test_GT_monotonicity( "msturing-1B" ) - test_GT_monotonicity( "msspacev-1B" ) - test_GT_monotonicity( "text2image-1B", increasing=False) - - # - # test recall on actual datasets - # - def extract_GT_monotonicity( dset, row, c1, c2): - print("TEST: %s, extraction" % dset, row, c1, c2) - dataset = DATASETS[dset]() - gt = dataset.get_groundtruth() - true_dists = gt[1] - lst = true_dists[row,c1:c2] - print(lst) - mtest = monotone_increasing(lst) - print(mtest) - print() - - def test_GT_as_query( dset, count ): - print("TEST: %s, using GT as query, k=10" % dset) - dataset = DATASETS[dset]() - gt = dataset.get_groundtruth() - if ASSERT: assert len(gt)==2 - true_ids = gt[0] - true_dists = gt[1] - if ASSERT: - assert true_ids.shape[1]==true_dists.shape[1] - assert true_ids.shape[1]>=GT_MIN_SIZE - assert true_dists.shape[1]>=GT_MIN_SIZE - run_ids = np.copy( gt[0] )[:,0:count] # create a query set from GT truncated at k - test_recall( true_ids, true_dists, run_ids, count, 1.0, 1.0 ) - - # test GT as query for each dataset - test_GT_as_query( "bigann-1B", 10 ) - test_GT_as_query( "deep-1B", 10 ) - test_GT_as_query( "text2image-1B", 10 ) - test_GT_as_query( "msturing-1B", 10 ) - test_GT_as_query( "msspacev-1B", 10 ) - - sys.exit(0) - - - -# -# useful functions -# -import itertools -import operator - -def monotone_increasing(lst): - pairs = zip(lst, lst[1:]) - bools = list(itertools.starmap(operator.le, pairs)) - #print(type(bools), len(bools), bools) - return all( bools ) - -def monotone_decreasing(lst): - pairs = zip(lst, lst[1:]) - bools = list(itertools.starmap(operator.ge, pairs)) - #print(type(lst), lst) - return all( bools ) - -def monotone(lst): - return monotone_increasing(lst) or monotone_decreasing(lst) - -if __name__ == "__main__": - main_tests() diff --git a/tests/tests.sh b/tests/tests.sh deleted file mode 100755 index 2e59a08d9..000000000 --- a/tests/tests.sh +++ /dev/null @@ -1,6 +0,0 @@ -#!/bin/bash - -# You should run this script from the repo top-level directory - -PYTHONPATH="." python tests/recall_tests.py - diff --git a/track1_baseline_faiss/README.md b/track1_baseline_faiss/README.md deleted file mode 100644 index 090f80ed2..000000000 --- a/track1_baseline_faiss/README.md +++ /dev/null @@ -1,170 +0,0 @@ -# Running the Faiss baselines - -## Installing software - -In addition to this repository, running the baseline code requires a conda install with Faiss - -```bash -wget https://repo.anaconda.com/archive/Anaconda3-2020.11-Linux-x86_64.sh - -bash Anaconda3-2020.11-Linux-x86_64.sh - -# follow instructions and run profile.sh to get a working conda - -conda create -n faiss_1.7.1 python=3.8 -conda activate faiss_1.7.1 -conda install -c pytorch faiss-cpu -``` - -All instructions below are supposed to be run from the root of the repository. -To make the package accessible, set `export PYTHONPATH=.` - -## Downloading the data - -To download the data (database files, query files and ground truth, do -``` -mkdir data/ # this is where all the data goes, a symlink is fine -python track1_baseline_faiss/baseline_faiss.py --dataset deep-1B --prepare -``` -The available datasets are bigann-1B deep-1B ssnpp-1B text2image-1B msturing-1B msspacev-1B. -To download the largest files, `--prepare` will use axel or azcopy. Make sure that they are in the path. - -Replace the -1B suffix with -100M or -10M to get a subset of each dataset (only the relevant fraction of the database will be downloaded). -This is useful for small-scale experiments. - -## Building the index - -There are several types of indexes in Faiss. -Here we focus on IVF variants with PQ compression as recommended [here](https://github.com/facebookresearch/faiss/wiki/Guidelines-to-choose-an-index#if-100m---1b-ivf1048576_hnsw32) and evaluated [here](https://github.com/facebookresearch/faiss/wiki/Indexing-1G-vectors#1b-datasets). - -The problem is that they require very large codebooks to define the IVF clusters. -This is fine (kind of) when a GPU is available to run the clustering, but not on CPU only. -Therefore, we perform a two-level clustering with n' = sqrt(ncentroids) first level cluster and n' clusterings of size n' at a refined level. -Then all n' * n' sub-clusters are indexed together in an IVF_HNSW. - -This writes like: - -```bash -python -u track1_baseline_faiss/baseline_faiss.py --dataset deep-1B \ - --indexkey OPQ64_128,IVF1048576_HNSW32,PQ64x4fsr \ - --maxtrain 100000000 \ - --two_level_clustering \ - --build \ - --add_splits 30 \ - --indexfile data/track1_baseline_faiss/deep-1B.IVF1M_2level_PQ64x4fsr.faissindex \ - --quantizer_efConstruction 200 \ - --quantizer_add_efSearch 80 -``` - -This works for deep-1B bigann-1B msturing-1B msspacev-1B. - -For ssnpp-1B, the type of index has to be adjusted a bit because the Faiss PQ64x4fsr does not support range search (see [the documentation of Faiss index types](https://github.com/facebookresearch/faiss/wiki/The-index-factory#encodings) for an explanation of the difference). - -Therefore, we use a slightly slower index type: PQ32. This gives: -```bash -python -u track1_baseline_faiss/baseline_faiss.py --dataset ssnpp-1B \ - --indexkey OPQ64_128,IVF1048576_HNSW32,PQ32 \ - --maxtrain 100000000 \ - --two_level_clustering \ - --build \ - --add_splits 30 \ - --indexfile data/track1_baseline_faiss/ssnpp-1B.IVF1M_2level_PQ23.faissindex \ - --quantizer_efConstruction 200 \ - --quantizer_add_efSearch 80 -``` - -The results on text2image-1B with the same index types are a lot worse. -This is probably due to the very lossy PQ compression. - -## Running the evaluation - -### Getting the pre-built indexes - -Pre-built indexes are available. -To download them - -```bash -wget https://dl.fbaipublicfiles.com/billion-scale-ann-benchmarks/track1_baseline_faiss/deep-1B.IVF1M_2level_PQ64x4fsr.faissindex -P data/ -wget https://dl.fbaipublicfiles.com/billion-scale-ann-benchmarks/track1_baseline_faiss/bigann-1B.IVF1M_2level_PQ64x4fsr.faissindex -P data/ -wget https://dl.fbaipublicfiles.com/billion-scale-ann-benchmarks/track1_baseline_faiss/msturing-1B.IVF1M_2level_PQ64x4fsr.faissindex -P data/ -wget https://dl.fbaipublicfiles.com/billion-scale-ann-benchmarks/track1_baseline_faiss/msspacev-1B.IVF1M_2level_PQ64x4fsr.faissindex -P data/ - -wget https://dl.fbaipublicfiles.com/billion-scale-ann-benchmarks/track1_baseline_faiss/ssnpp-1B.IVF1M_2level_PQ32.faissindex -P data/ -wget https://dl.fbaipublicfiles.com/billion-scale-ann-benchmarks/track1_baseline_faiss/text2image-1B.IVF1M_2level_PQ32.faissindex -P data/ - -``` - - -### Running the evaluation - - - - -The evaluation proceeds by loading the index and looping over a set of search-time parameters that obtain different speed-accuracy tradeoffs. - -This writes as: -```bash - -params=" -nprobe=1,quantizer_efSearch=4 -nprobe=2,quantizer_efSearch=4 -... -nprobe=512,quantizer_efSearch=256 -nprobe=512,quantizer_efSearch=512 -nprobe=1024,quantizer_efSearch=512 -" - -python track1_baseline_faiss/baseline_faiss.py \ - --dataset deep-1B --indexfile data/deep-1B.IVF1M_2level_PQ64x4fsr.faissindex \ - --search --searchparams $params - -``` - -The sets of parameters per dataset are listed in [this GIST](https://gist.github.com/mdouze/bb71032f0b3bf3cc9bdaa6ff1287c144). -They are ordered from fastest / least accurate to slowest / most accurate. - -### Results - -The results should look like: - -``` -parameters inter@ 10 time(ms/q) nb distances #runs -nprobe=1,quantizer_efSearch=4 0.1738 0.00327 12210374 92 -nprobe=2,quantizer_efSearch=4 0.2394 0.00424 24328050 71 -nprobe=2,quantizer_efSearch=8 0.2879 0.00545 24278048 56 -... -nprobe=512,quantizer_efSearch=256 0.6877 0.75883 5896044691 1 -nprobe=512,quantizer_efSearch=512 0.6886 0.77421 5890639041 1 -nprobe=1024,quantizer_efSearch=512 0.6886 1.46841 11607413418 1 -``` - -This means that by setting the parameters `nprobe=2,quantizer_efSearch=4`, we obtain 0.2394 recall @ 10 (aka inter @10) for that dataset, the search will take 0.00327 ms per query (305810 QPS). -The total number of distances computed for all queries is 24328050 and this measurement was obtained in 71 runs (to reduce jitter in time measurements). - - -### Plots - -The speed-accuracy tradeoff plots are here (with 32 threads on a given 2.2Ghz machine): - -![](plots/bigann-1B.png) - -![](plots/deep-1B.png) - -![](plots/msturing-1B.png) - -![](plots/msspace-1B.png) - -![](plots/ssnpp-1B.png) - -![](plots/text2image-1B.png) - - -### Determining the optimal search-time parameters - -The Pareto-optimal parameter combinations can be obtained by a random exploration of the parameter space, as described [here](https://github.com/facebookresearch/faiss/wiki/Index-IO,-cloning-and-hyper-parameter-tuning#auto-tuning-the-runtime-parameters). -To perform this operation, do: -```bash -python track1_baseline_faiss/baseline_faiss.py \ - --dataset deep-1B --indexfile data/deep-1B.IVF1M_2level_PQ64x4fsr.faissindex \ - --search -``` diff --git a/track1_baseline_faiss/__init__.py b/track1_baseline_faiss/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/track1_baseline_faiss/baseline_faiss.py b/track1_baseline_faiss/baseline_faiss.py deleted file mode 100644 index 4446401e2..000000000 --- a/track1_baseline_faiss/baseline_faiss.py +++ /dev/null @@ -1,791 +0,0 @@ -import os -import sys -import time -import pdb -import gc -import numpy as np -import faiss -import argparse -import resource - -import benchmark.datasets -from benchmark.datasets import DATASETS -from benchmark.plotting import eval_range_search - -#################################################################### -# Index building functions -#################################################################### - - -def two_level_clustering(xt, nc1, nc2, clustering_niter=25, spherical=False): - d = xt.shape[1] - - print(f"2-level clustering of {xt.shape} nb clusters = {nc1}*{nc2} = {nc1*nc2}") - print("perform coarse training") - - km = faiss.Kmeans( - d, nc1, verbose=True, niter=clustering_niter, - max_points_per_centroid=2000, - spherical=spherical - ) - km.train(xt) - - print() - - # coarse centroids - centroids1 = km.centroids - - print("assigning the training set") - t0 = time.time() - _, assign1 = km.assign(xt) - bc = np.bincount(assign1, minlength=nc1) - print(f"done in {time.time() - t0:.2f} s. Sizes of clusters {min(bc)}-{max(bc)}") - o = assign1.argsort() - del km - - # train sub-clusters - i0 = 0 - c2 = [] - t0 = time.time() - for c1 in range(nc1): - print(f"[{time.time() - t0:.2f} s] training sub-cluster {c1}/{nc1}\r", end="", flush=True) - i1 = i0 + bc[c1] - subset = o[i0:i1] - assert np.all(assign1[subset] == c1) - km = faiss.Kmeans(d, nc2, spherical=spherical) - xtsub = xt[subset] - km.train(xtsub) - c2.append(km.centroids) - i0 = i1 - print(f"done in {time.time() - t0:.2f} s") - return np.vstack(c2) - - -def unwind_index_ivf(index): - if isinstance(index, faiss.IndexPreTransform): - assert index.chain.size() == 1 - vt = faiss.downcast_VectorTransform(index.chain.at(0)) - index_ivf, vt2 = unwind_index_ivf(faiss.downcast_index(index.index)) - assert vt2 is None - return index_ivf, vt - if hasattr(faiss, "IndexRefine") and isinstance(index, faiss.IndexRefine): - return unwind_index_ivf(faiss.downcast_index(index.base_index)) - if isinstance(index, faiss.IndexIVF): - return index, None - else: - return None, None - - -def build_index(args, ds): - nq, d = ds.nq, ds.d - nb, d = ds.nq, ds.d - - if args.buildthreads == -1: - print("Build-time number of threads:", faiss.omp_get_max_threads()) - else: - print("Set build-time number of threads:", args.buildthreads) - faiss.omp_set_num_threads(args.buildthreads) - - metric_type = ( - faiss.METRIC_L2 if ds.distance() == "euclidean" else - faiss.METRIC_INNER_PRODUCT if ds.distance() in ("ip", "angular") else - 1/0 - ) - print("metric type", metric_type) - index = faiss.index_factory(d, args.indexkey, metric_type) - - index_ivf, vec_transform = unwind_index_ivf(index) - if vec_transform is None: - vec_transform = lambda x: x - else: - vec_transform = faiss.downcast_VectorTransform(vec_transform) - - if args.by_residual != -1: - by_residual = args.by_residual == 1 - print("setting by_residual = ", by_residual) - index_ivf.by_residual # check if field exists - index_ivf.by_residual = by_residual - - if index_ivf: - print("Update add-time parameters") - # adjust default parameters used at add time for quantizers - # because otherwise the assignment is inaccurate - quantizer = faiss.downcast_index(index_ivf.quantizer) - if isinstance(quantizer, faiss.IndexRefine): - print(" update quantizer k_factor=", quantizer.k_factor, end=" -> ") - quantizer.k_factor = 32 if index_ivf.nlist < 1e6 else 64 - print(quantizer.k_factor) - base_index = faiss.downcast_index(quantizer.base_index) - if isinstance(base_index, faiss.IndexIVF): - print(" update quantizer nprobe=", base_index.nprobe, end=" -> ") - base_index.nprobe = ( - 16 if base_index.nlist < 1e5 else - 32 if base_index.nlist < 4e6 else - 64) - print(base_index.nprobe) - elif isinstance(quantizer, faiss.IndexHNSW): - print(" update quantizer efSearch=", quantizer.hnsw.efSearch, end=" -> ") - if args.quantizer_add_efSearch > 0: - quantizer.hnsw.efSearch = args.quantizer_add_efSearch - else: - quantizer.hnsw.efSearch = 40 if index_ivf.nlist < 4e6 else 64 - print(quantizer.hnsw.efSearch) - if args.quantizer_efConstruction != -1: - print(" update quantizer efConstruction=", quantizer.hnsw.efConstruction, end=" -> ") - quantizer.hnsw.efConstruction = args.quantizer_efConstruction - print(quantizer.hnsw.efConstruction) - - - index.verbose = True - if index_ivf: - index_ivf.verbose = True - index_ivf.quantizer.verbose = True - index_ivf.cp.verbose = True - - - maxtrain = args.maxtrain - if maxtrain == 0: - if 'IMI' in args.indexkey: - maxtrain = int(256 * 2 ** (np.log2(index_ivf.nlist) / 2)) - elif index_ivf: - maxtrain = 50 * index_ivf.nlist - else: - # just guess... - maxtrain = 256 * 100 - maxtrain = max(maxtrain, 256 * 100) - print("setting maxtrain to %d" % maxtrain) - - # train on dataset - print(f"getting first {maxtrain} dataset vectors for training") - - xt2 = next(ds.get_dataset_iterator(bs=maxtrain)) - - print("train, size", xt2.shape) - assert np.all(np.isfinite(xt2)) - - t0 = time.time() - - if (isinstance(vec_transform, faiss.OPQMatrix) and - isinstance(index_ivf, faiss.IndexIVFPQFastScan)): - print(" Forcing OPQ training PQ to PQ4") - ref_pq = index_ivf.pq - training_pq = faiss.ProductQuantizer( - ref_pq.d, ref_pq.M, ref_pq.nbits - ) - vec_transform.pq - vec_transform.pq = training_pq - - if args.clustering_niter >= 0: - print(("setting nb of clustering iterations to %d" % - args.clustering_niter)) - index_ivf.cp.niter = args.clustering_niter - - train_index = None - if args.train_on_gpu: - print("add a training index on GPU") - train_index = faiss.index_cpu_to_all_gpus( - faiss.IndexFlatL2(index_ivf.d)) - index_ivf.clustering_index = train_index - - if args.two_level_clustering: - sqrt_nlist = int(np.sqrt(index_ivf.nlist)) - assert sqrt_nlist ** 2 == index_ivf.nlist - - centroids_trainset = xt2 - if isinstance(vec_transform, faiss.VectorTransform): - print(" training vector transform") - vec_transform.train(xt2) - print(" transform trainset") - centroids_trainset = vec_transform.apply_py(centroids_trainset) - - centroids = two_level_clustering( - centroids_trainset, sqrt_nlist, sqrt_nlist, - spherical=(metric_type == faiss.METRIC_INNER_PRODUCT) - ) - - if not index_ivf.quantizer.is_trained: - print(" training quantizer") - index_ivf.quantizer.train(centroids) - - print(" add centroids to quantizer") - index_ivf.quantizer.add(centroids) - - index.train(xt2) - print(" Total train time %.3f s" % (time.time() - t0)) - - if train_index is not None: - del train_index - index_ivf.clustering_index = None - gc.collect() - - print("adding") - - t0 = time.time() - if args.add_bs == -1: - index.add(sanitize(ds.get_database())) - else: - i0 = 0 - nsplit = args.add_splits - for sno in range(nsplit): - print(f"============== SPLIT {sno}/{nsplit}") - for xblock in ds.get_dataset_iterator(bs=args.add_bs, split=(nsplit, sno)): - i1 = i0 + len(xblock) - print(" adding %d:%d / %d [%.3f s, RSS %d kiB] " % ( - i0, i1, ds.nb, time.time() - t0, - faiss.get_mem_usage_kb())) - index.add(xblock) - i0 = i1 - gc.collect() - if sno == args.stop_at_split: - print("stopping at split", sno) - break - - print(" add in %.3f s" % (time.time() - t0)) - if args.indexfile: - print("storing", args.indexfile) - faiss.write_index(index, args.indexfile) - - return index - -#################################################################### -# Evaluation functions -#################################################################### - - -def compute_inter(a, b): - nq, rank = a.shape - ninter = sum( - np.intersect1d(a[i, :rank], b[i, :rank]).size - for i in range(nq) - ) - return ninter / a.size - -def knn_search_batched(index, xq, k, bs): - D, I = [], [] - for i0 in range(0, len(xq), bs): - Di, Ii = index.search(xq[i0:i0 + bs], k) - D.append(Di) - I.append(Ii) - return np.vstack(D), np.vstack(I) - -def eval_setting_knn(index, xq, gt, k=0, inter=False, min_time=3.0, query_bs=-1): - nq = xq.shape[0] - gt_I, gt_D = gt - - ivf_stats = faiss.cvar.indexIVF_stats - ivf_stats.reset() - nrun = 0 - t0 = time.time() - while True: - if query_bs == -1: - D, I = index.search(xq, k) - else: - D, I = knn_search_batched(index, xq, k, query_bs) - nrun += 1 - t1 = time.time() - if t1 - t0 > min_time: - break - ms_per_query = ((t1 - t0) * 1000.0 / nq / nrun) - - if inter: - rank = k - inter_measure = compute_inter(gt_I[:, :rank], I[:, :rank]) - print("%.4f" % inter_measure, end=' ') - else: - for rank in 1, 10, 100: - n_ok = (I[:, :rank] == gt_I[:, :1]).sum() - print("%.4f" % (n_ok / float(nq)), end=' ') - print(" %9.5f " % ms_per_query, end=' ') - - if ivf_stats.search_time == 0: - # happens for IVFPQFastScan where the stats are not logged by default - print("%12d %5.2f " % (ivf_stats.ndis / nrun, 0.0), end=' ') - else: - pc_quantizer = ivf_stats.quantization_time / ivf_stats.search_time * 100 - print("%12d %5.2f " % (ivf_stats.ndis / nrun, pc_quantizer), end=' ') - print(nrun) - -def eval_setting_range(index, xq, gt, radius=0, inter=False, min_time=3.0, query_bs=-1): - nq = xq.shape[0] - gt_nres, gt_I, gt_D = gt - gt_lims = np.zeros(nq + 1, dtype=int) - gt_lims[1:] = np.cumsum(gt_nres) - ivf_stats = faiss.cvar.indexIVF_stats - ivf_stats.reset() - nrun = 0 - t0 = time.time() - while True: - if query_bs == -1: - lims, D, I = index.range_search(xq, radius) - else: - raise NotImplemented - nrun += 1 - t1 = time.time() - if t1 - t0 > min_time: - break - ms_per_query = ((t1 - t0) * 1000.0 / nq / nrun) - - ap = eval_range_search.compute_AP((gt_lims, gt_I, gt_D), (lims, I, D)) - print("%.4f" % ap, end=' ') - print(" %9.5f " % ms_per_query, end=' ') - - print("%12d %5d " % (ivf_stats.ndis / nrun, D.size), end=' ') - print(nrun) - - -def result_header(ds, args): - - # setup the Criterion object - if ds.search_type() == "range": - header = ( - '%-40s AP time(ms/q) nb distances nb_res #runs' % - "parameters" - ) - crit = None - elif args.inter: - print("Optimize for intersection @ ", args.k) - crit = faiss.IntersectionCriterion(ds.nq, args.k) - header = ( - '%-40s inter@%3d time(ms/q) nb distances %%quantization #runs' % - ("parameters", args.k) - ) - else: - print("Optimize for 1-recall @ 1") - crit = faiss.OneRecallAtRCriterion(ds.nq, 1) - header = ( - '%-40s R@1 R@10 R@100 time(ms/q) nb distances %%quantization #runs' % - "parameters" - ) - return header, crit - -def op_compute_bounds(ps, ops, cno): - # lower_bound_t = 0.0 - # upper_bound_perf = 1.0 - bounds = np.array([0, 1], dtype="float64") - sp = faiss.swig_ptr - for i in range(ops.all_pts.size()): - ps.update_bounds(cno, ops.all_pts.at(i), sp(bounds[1:2]), sp(bounds[0:1])) - # lower_bound_t, upper_bound_perf - return bounds[0], bounds[1] - - - -def explore_parameter_space_range(index, xq, gt, ps, radius): - """ exploration of the parameter space for range search, using the - Average Precision as criterion - """ - - n_experiments = ps.n_experiments - n_comb = ps.n_combinations() - min_time = ps.min_test_duration - verbose = ps.verbose - - gt_nres, gt_I, gt_D = gt - gt_lims = np.zeros(len(gt_nres) + 1, dtype=int) - gt_lims[1:] = np.cumsum(gt_nres) - gt = (gt_lims, gt_I, gt_D) - - ops = faiss.OperatingPoints() - - def run_1_experiment(cno): - ps.set_index_parameters(index, cno) - - nrun = 0 - t0 = time.time() - while True: - lims, D, I = index.range_search(xq, radius) - nrun += 1 - t1 = time.time() - if t1 - t0 > min_time: - break - - t_search = (t1 - t0) / nrun - perf = eval_range_search.compute_AP(gt, (lims, I, D)) - keep = ops.add(perf, t_search, ps.combination_name(cno), cno) - - return len(D), perf, t_search, nrun, keep - - if n_experiments == 0: - # means exhaustive run - for cno in range(n_comb): - nres, perf, t_search, nrun, keep = run_1_experiment(cno) - - if verbose: - print(" %d/%d: %s nres=%d perf=%.3f t=%.3f s %s" % ( - cno, n_comb, - ps.combination_name(cno), - nres, perf, t_search, "*" if keep else "")) - return ops - - n_experiments = min(n_experiments, n_comb) - - perm = np.zeros(n_experiments, int) - # make sure the slowest and fastest experiment are run - perm[0] = 0 - perm[1] = n_comb - 1 - rs = np.random.RandomState(1234) - perm[2:] = 1 + rs.choice(n_comb - 2, n_experiments - 2, replace=False) - - for xp, cno in enumerate(perm): - cno = int(cno) - if verbose: - print(" %d/%d: cno=%d %s " % ( - xp, n_experiments, cno, ps.combination_name(cno)), - end="", flush=True) - - # check if we can skip this experiment - lower_bound_t, upper_bound_perf = op_compute_bounds(ps, ops, cno) - - best_t = ops.t_for_perf(upper_bound_perf) - - if verbose: - print("bounds [perf<=%.3f t>=%.3f] " % ( - upper_bound_perf, lower_bound_t), - end="skip\n" if best_t <= lower_bound_t else " " - ) - if best_t <= lower_bound_t: - continue - - nres, perf, t_search, nrun, keep = run_1_experiment(cno) - - if verbose: - print(" nres %d perf %.3f t %.3f (%d %s) %s" % ( - nres, perf, t_search, nrun, - "runs" if nrun >= 2 else "run", - "*" if keep else "")) - - return ops - - -#################################################################### -# Driver functions -#################################################################### - - - -def run_experiments_searchparams(ds, index, args): - """ - Evaluate a predefined set of runtime parameters - """ - k = args.k - xq = ds.get_queries() - - nq = len(xq) - - ps = faiss.ParameterSpace() - ps.initialize(index) - - header, _ = result_header(ds, args) - - searchparams = args.searchparams - - print(f"Running evaluation on {len(searchparams)} searchparams") - print(header) - maxw = max(max(len(p) for p in searchparams), 40) - for params in searchparams: - ps.set_index_parameters(index, params) - - print(params.ljust(maxw), end=' ') - sys.stdout.flush() - - if ds.search_type() == "knn": - eval_setting_knn( - index, xq, ds.get_groundtruth(k=args.k), - k=args.k, - inter=args.inter, min_time=args.min_test_duration, - query_bs=args.query_bs - ) - else: - eval_setting_range( - index, xq, ds.get_groundtruth(k=args.k), - radius=args.radius, - inter=args.inter, min_time=args.min_test_duration, - query_bs=args.query_bs - ) - - -def run_experiments_autotune(ds, index, args): - """ Explore the space of parameters and keep Pareto-optimal ones. """ - k = args.k - - xq = ds.get_queries() - nq = len(xq) - - ps = faiss.ParameterSpace() - ps.initialize(index) - - ps.n_experiments = args.n_autotune - ps.min_test_duration = args.min_test_duration - - for kv in args.autotune_max: - k, vmax = kv.split(':') - vmax = float(vmax) - print("limiting %s to %g" % (k, vmax)) - pr = ps.add_range(k) - values = faiss.vector_to_array(pr.values) - values = np.array([v for v in values if v < vmax]) - faiss.copy_array_to_vector(values, pr.values) - - for kv in args.autotune_range: - k, vals = kv.split(':') - vals = np.fromstring(vals, sep=',') - print("setting %s to %s" % (k, vals)) - pr = ps.add_range(k) - faiss.copy_array_to_vector(vals, pr.values) - - header, crit = result_header(ds, args) - - # then we let Faiss find the optimal parameters by itself - print("exploring operating points, %d threads" % faiss.omp_get_max_threads()); - ps.display() - - t0 = time.time() - - if ds.search_type() == "knn": - # by default, the criterion will request only 1 NN - crit.nnn = args.k - gt_I, gt_D = ds.get_groundtruth(k=args.k) - crit.set_groundtruth(None, gt_I.astype('int64')) - op = ps.explore(index, xq, crit) - elif ds.search_type() == "range": - op = explore_parameter_space_range( - index, xq, ds.get_groundtruth(), ps, args.radius - ) - else: - assert False - - print("Done in %.3f s, available OPs:" % (time.time() - t0)) - op.display() - - print("Re-running evaluation on selected OPs") - print(header) - opv = op.optimal_pts - maxw = max(max(len(opv.at(i).key) for i in range(opv.size())), 40) - for i in range(opv.size()): - opt = opv.at(i) - - ps.set_index_parameters(index, opt.key) - - print(opt.key.ljust(maxw), end=' ') - sys.stdout.flush() - if ds.search_type() == "knn": - eval_setting_knn( - index, xq, ds.get_groundtruth(k=args.k), - k=args.k, - inter=args.inter, min_time=args.min_test_duration - ) - else: - eval_setting_range( - index, xq, ds.get_groundtruth(k=args.k), - radius=args.radius, - inter=args.inter, min_time=args.min_test_duration - ) - - -class DatasetWrapInPairwiseQuantization: - - def __init__(self, ds, C): - self.ds = ds - self.C = C - self.Cq = np.linalg.inv(C.T) - # xb_pw = np.ascontiguousarray((C @ xb.T).T) - # xq_pw = np.ascontiguousarray((Cq @ xq.T).T) - # copy fields - - for name in "nb d nq dtype distance search_type get_groundtruth".split(): - setattr(self, name, getattr(ds, name)) - - def get_dataset(self): - return self.ds.get_dataset() @ self.C.T - - def get_queries(self): - return self.ds.get_queries() @ self.Cq.T - - def get_dataset_iterator(self, bs=512, split=(1,0)): - for xb in self.ds.get_dataset_iterator(bs=bs, split=split): - yield xb @ self.C.T - - -#################################################################### -# Main -#################################################################### - - -def main(): - - parser = argparse.ArgumentParser() - - def aa(*args, **kwargs): - group.add_argument(*args, **kwargs) - - group = parser.add_argument_group('What to do') - aa('--build', default=False, action="store_true") - aa('--search', default=False, action="store_true") - aa('--prepare', default=False, action="store_true", - help="call prepare() to download the dataset before computing") - - group = parser.add_argument_group('dataset options') - aa('--dataset', choices=DATASETS.keys(), required=True) - aa('--basedir', help="override basedir for dataset") - aa('--pairwise_quantization', default="", - help="load/store pairwise quantization matrix") - aa('--query_bs', default=-1, type=int, - help='perform queries in batches of this size') - - group = parser.add_argument_group('index construction') - - aa('--indexkey', default='HNSW32', help='index_factory type') - aa('--by_residual', default=-1, type=int, - help="set if index should use residuals (default=unchanged)") - aa('--M0', default=-1, type=int, help='size of base level') - aa('--maxtrain', default=0, type=int, - help='maximum number of training points (0 to set automatically)') - aa('--indexfile', default='', help='file to read or write index from') - aa('--add_bs', default=100000, type=int, - help='add elements index by batches of this size') - aa('--add_splits', default=1, type=int, - help="Do adds in this many splits (otherwise risk of OOM for large datasets)") - aa('--stop_at_split', default=-1, type=int, - help="stop at this split (for debugging)") - - aa('--no_precomputed_tables', action='store_true', default=False, - help='disable precomputed tables (uses less memory)') - aa('--clustering_niter', default=-1, type=int, - help='number of clustering iterations (-1 = leave default)') - aa('--two_level_clustering', action="store_true", default=False, - help='perform a 2-level tree clustering') - aa('--train_on_gpu', default=False, action='store_true', - help='do training on GPU') - aa('--quantizer_efConstruction', default=-1, type=int, - help="override the efClustering of the quantizer") - aa('--quantizer_add_efSearch', default=-1, type=int, - help="override the efSearch of the quantizer at add time") - aa('--buildthreads', default=-1, type=int, - help='nb of threads to use at build time') - - group = parser.add_argument_group('searching') - - aa('--k', default=10, type=int, help='nb of nearest neighbors') - aa('--radius', default=96237, type=float, help='radius for range search') - aa('--inter', default=True, action='store_true', - help='use intersection measure instead of 1-recall as metric') - aa('--searchthreads', default=-1, type=int, - help='nb of threads to use at search time') - aa('--searchparams', nargs='+', default=['autotune'], - help="search parameters to use (can be autotune or a list of params)") - aa('--n_autotune', default=500, type=int, - help="max nb of autotune experiments") - aa('--autotune_max', default=[], nargs='*', - help='set max value for autotune variables format "var:val" (exclusive)') - aa('--autotune_range', default=[], nargs='*', - help='set complete autotune range, format "var:val1,val2,..."') - aa('--min_test_duration', default=3.0, type=float, - help='run test at least for so long to avoid jitter') - aa('--parallel_mode', default=-1, type=int, - help="set search-time parallel mode for IVF indexes") - - group = parser.add_argument_group('computation options') - aa("--maxRAM", default=-1, type=int, help="set max RSS in GB (avoid OOM crash)") - - - args = parser.parse_args() - - print("args=", args) - - if args.basedir: - print("setting datasets basedir to", args.basedir) - benchmark.datasets.BASEDIR - benchmark.datasets.BASEDIR = args.basedir - - if args.maxRAM > 0: - print("setting max RSS to", args.maxRAM, "GiB") - resource.setrlimit( - resource.RLIMIT_DATA, (args.maxRAM * 1024 ** 3, resource.RLIM_INFINITY) - ) - - os.system('echo -n "nb processors "; ' - 'cat /proc/cpuinfo | grep ^processor | wc -l; ' - 'cat /proc/cpuinfo | grep ^"model name" | tail -1') - - ds = DATASETS[args.dataset]() - print(ds) - - nq, d = ds.nq, ds.d - nb, d = ds.nq, ds.d - - if args.prepare: - print("downloading dataset...") - ds.prepare() - print("dataset ready") - - if not (args.build or args.search): - return - - if args.pairwise_quantization: - if os.path.exists(args.pairwise_quantization): - print("loading pairwise quantization matrix", args.pairwise_quantization) - C = np.load(args.pairwise_quantization) - else: - print("training pairwise quantization") - xq_train = ds.get_query_train() - G = xq_train.T @ xq_train - C = np.linalg.cholesky(G).T - print("store matrix in", args.pairwise_quantization) - np.save(args.pairwise_quantization, C) - # Cq = np.linalg.inv(C.T) - # xb_pw = np.ascontiguousarray((C @ xb.T).T) - # xq_pw = np.ascontiguousarray((Cq @ xq.T).T) - ds = DatasetWrapInPairwiseQuantization(ds, C) - - if args.build: - print("build index, key=", args.indexkey) - index = build_index(args, ds) - else: - print("reading", args.indexfile) - index = faiss.read_index(args.indexfile) - - index_ivf, vec_transform = unwind_index_ivf(index) - if vec_transform is None: - vec_transform = lambda x: x - - if index_ivf is not None: - print("imbalance_factor=", index_ivf.invlists.imbalance_factor()) - - if args.no_precomputed_tables: - if isinstance(index_ivf, faiss.IndexIVFPQ): - print("disabling precomputed table") - index_ivf.use_precomputed_table = -1 - index_ivf.precomputed_table.clear() - - if args.indexfile: - print("index size on disk: ", os.stat(args.indexfile).st_size) - - print("current RSS:", faiss.get_mem_usage_kb() * 1024) - - precomputed_table_size = 0 - if hasattr(index_ivf, 'precomputed_table'): - precomputed_table_size = index_ivf.precomputed_table.size() * 4 - - print("precomputed tables size:", precomputed_table_size) - - if args.search: - - if args.searchthreads == -1: - print("Search threads:", faiss.omp_get_max_threads()) - else: - print("Setting nb of threads to", args.searchthreads) - faiss.omp_set_num_threads(args.searchthreads) - - if args.parallel_mode != -1: - print("setting IVF parallel mode to", args.parallel_mode) - index_ivf.parallel_mode - index_ivf.parallel_mode = args.parallel_mode - - if args.searchparams == ["autotune"]: - run_experiments_autotune(ds, index, args) - else: - run_experiments_searchparams(ds, index, args) - - -if __name__ == "__main__": - main() diff --git a/track1_baseline_faiss/parse_results.py b/track1_baseline_faiss/parse_results.py deleted file mode 100644 index 48e3411ac..000000000 --- a/track1_baseline_faiss/parse_results.py +++ /dev/null @@ -1,99 +0,0 @@ -""" -Parse log files from baseline_faiss.py - -""" -import os -import numpy as np - - - -def parse_result_file(fname): - # print fname - st = 0 - res = [] - keys = [] - stats = {} - stats['run_version'] = fname[-8] - indexkey = None - for l in open(fname): - if l.startswith("srun:"): - # looks like a crash... - if indexkey is None: - raise RuntimeError("instant crash") - break - elif st == 0: - if l.startswith("dataset in dimension"): - fi = l.split() - stats["d"] = int(fi[3][:-1]) - stats["nq"] = int(fi[9]) - stats["nb"] = int(fi[11]) - stats["nt"] = int(fi[13]) - if l.startswith('index size on disk:'): - stats['index_size'] = int(l.split()[-1]) - if l.startswith('current RSS:'): - stats['RSS'] = int(l.split()[-1]) - if l.startswith('precomputed tables size:'): - stats['tables_size'] = int(l.split()[-1]) - if l.startswith('Setting nb of threads to'): - stats['n_threads'] = int(l.split()[-1]) - if l.startswith(' add in'): - stats['add_time'] = float(l.split()[-2]) - if l.startswith('args:'): - args = eval(l[l.find(' '):]) - indexkey = args.indexkey - if l.startswith('build index, key='): - indexkey = l.split()[-1] - elif "time(ms/q)" in l: - # result header - if 'R@1 R@10 R@100' in l: - stats["measure"] = "recall" - stats["ranks"] = [1, 10, 100] - elif 'I@1 I@10 I@100' in l: - stats["measure"] = "inter" - stats["ranks"] = [1, 10, 100] - elif 'inter@' in l: - stats["measure"] = "inter" - fi = l.split() - if fi[1] == "inter@": - rank = int(fi[2]) - else: - rank = int(fi[1][len("inter@"):]) - stats["ranks"] = [rank] - elif 'AP' in l: - stats["measure"] = "average_precision" - else: - assert False - st = 1 - elif 'index size on disk:' in l: - stats["index_size"] = int(l.split()[-1]) - elif st == 1: - st = 2 - elif st == 2: - fi = l.split() - if l[0] == " ": - # means there are 0 parameters - fi = [""] + fi - keys.append(fi[0]) - if len(fi[1:]) > 0: - res.append([float(x) for x in fi[1:]]) - return indexkey, np.array(res), keys, stats - - -def find_latest_version(fname): - """ all log files are called - XX.a.log - XX.b.log - - Where XX is the experiment id and a, b... are versions. - The version is used when the same experiment needs to be - redone because it failed. This function returns the latest version - """ - assert fname.endswith(".log") - pref = fname[:-5] - lv = "" - for suf in "abcdefghijklmnopqrs": - fname = pref + suf + '.log' - if os.path.exists(fname): - lv = fname - assert lv - return lv \ No newline at end of file diff --git a/track1_baseline_faiss/plots/bigann-1B.png b/track1_baseline_faiss/plots/bigann-1B.png deleted file mode 100644 index 7a1d4a5b7..000000000 Binary files a/track1_baseline_faiss/plots/bigann-1B.png and /dev/null differ diff --git a/track1_baseline_faiss/plots/deep-1B.png b/track1_baseline_faiss/plots/deep-1B.png deleted file mode 100644 index 0c6bfc32e..000000000 Binary files a/track1_baseline_faiss/plots/deep-1B.png and /dev/null differ diff --git a/track1_baseline_faiss/plots/msspacev-1B.png b/track1_baseline_faiss/plots/msspacev-1B.png deleted file mode 100644 index a8f0c5fda..000000000 Binary files a/track1_baseline_faiss/plots/msspacev-1B.png and /dev/null differ diff --git a/track1_baseline_faiss/plots/msturing-1B.png b/track1_baseline_faiss/plots/msturing-1B.png deleted file mode 100644 index 2363edcd2..000000000 Binary files a/track1_baseline_faiss/plots/msturing-1B.png and /dev/null differ diff --git a/track1_baseline_faiss/plots/ssnpp-1B.png b/track1_baseline_faiss/plots/ssnpp-1B.png deleted file mode 100644 index 96e1b9720..000000000 Binary files a/track1_baseline_faiss/plots/ssnpp-1B.png and /dev/null differ diff --git a/track1_baseline_faiss/plots/text2image-1B.png b/track1_baseline_faiss/plots/text2image-1B.png deleted file mode 100644 index 5f7b9b302..000000000 Binary files a/track1_baseline_faiss/plots/text2image-1B.png and /dev/null differ diff --git a/track1_baseline_faiss/run_baselines.bash b/track1_baseline_faiss/run_baselines.bash deleted file mode 100644 index 62901423f..000000000 --- a/track1_baseline_faiss/run_baselines.bash +++ /dev/null @@ -1,530 +0,0 @@ -set -e - -export PYTHONPATH=. - - - - -function run_on () { - local sbatch_opt="$1" - shift - local name=$1 - shift - local torun=" $@ " - - if [ -e slurm_scripts/$name.sh ]; then - echo "script" slurm_scripts/$name.sh exists - exit 1 - fi - - echo -n $name " " - - echo $@ > slurm_scripts/$name.sh - - sbatch $sbatch_opt \ - -J $name -o logs/$name.log \ - --wrap "bash slurm_scripts/$name.sh" - -} - - -function run_on_1gpu () { - run_on "--gres=gpu:1 --ntasks=1 --time=30:00:00 --cpus-per-task=20 - --partition=devlab --mem=64g --nodes=1 " "$@" -} - -function run_on_1gpu_learnlab () { - run_on "--gres=gpu:1 --ntasks=1 --time=30:00:00 --cpus-per-task=20 - --partition=learnlab --mem=64g --nodes=1 " "$@" -} -function run_on_half_machine () { - run_on "--gres=gpu:1 --ntasks=1 --time=30:00:00 --cpus-per-task=40 - --partition=learnlab --mem=256g --nodes=1 " "$@" -} - -function run_on_2gpu_ram256 () { - run_on "--gres=gpu:2 --ntasks=1 --time=30:00:00 --cpus-per-task=20 - --partition=learnlab --mem=256g --nodes=1 " "$@" -} - - - -############################################################## -# Small scale experiments to evaluate effect of 2-level clustering -############################################################## - -# compare 2-level 65k clustering index and regular one - -basedir=data/track1_baseline_faiss - - -if false; then - -dsname=bigann-10M - - -run_on_1gpu $dsname.IVF65k_HNSW.a \ - python -u track1_baseline_faiss/baseline_faiss.py \ - --dataset $dsname --indexfile $basedir/$dsname.faissindex \ - --indexkey PCAR64,IVF65536_HNSW32,Flat --maxtrain $((65536 * 50)) \ - --search --train_on_gpu - - -run_on_1gpu $dsname.IVF65k_2level_HNSW.b \ - python -u track1_baseline_faiss/baseline_faiss.py \ - --dataset $dsname --indexfile $basedir/$dsname.IVF65k_2level_HNSW.faissindex \ - --indexkey PCAR64,IVF65536_HNSW32,Flat --maxtrain $((65536 * 50)) \ - --two_level_clustering \ - --search - - - - -# for efC in 50 100 200; do - -for efC in 400 800; do - -run_on_1gpu $dsname.IVF65k_HNSW_efC$efC.b \ - python -u track1_baseline_faiss/baseline_faiss.py \ - --dataset $dsname --indexfile $basedir/$dsname.IVF65k_HNSW_efC$efC.faissindex \ - --indexkey PCAR64,IVF65536_HNSW32,Flat --maxtrain $((65536 * 50)) \ - --quantizer_efConstruction $efC \ - --build --search --train_on_gpu - -done - - - -# for efS in 20 40 80; do -for efS in 160 320; do - -name=$dsname.IVF65k_2level_HNSW_efC200_efS$efS - -run_on_1gpu $name.a \ - python -u track1_baseline_faiss/baseline_faiss.py \ - --dataset $dsname --indexfile $basedir/$name.faissindex \ - --indexkey PCAR64,IVF65536_HNSW32,Flat --maxtrain $((65536 * 50)) \ - --quantizer_efConstruction 200 \ - --quantizer_add_efSearch $efS \ - --two_level_clustering \ - --build --search - -done - - - - -############################################################## -# Experiments on scale 100M -############################################################## - -# .a: build -# .c: eval w 32 threads - -# start with least problematic datasets (no IP search, no range search) -# msspace-1B may need to redo experiments because of ties in distance computations -for dsname in bigann-100M deep-100M msturing-100M msspacev-100M; do - - for nc in 256k 1M; do - - case $nc in - 1M) ncn=$((1<<20)) ;; - 256k) ncn=$((1<<18)) ;; - esac - - name=$dsname.IVF${nc}_2level_PQ32 - - run_on_half_machine $name.c \ - python -u track1_baseline_faiss/baseline_faiss.py \ - --dataset $dsname --indexfile $basedir/$name.faissindex \ - --indexkey OPQ32_128,IVF${ncn}_HNSW32,PQ32 \ - --maxtrain 100000000 \ - --quantizer_efConstruction 200 \ - --quantizer_add_efSearch 80 \ - --two_level_clustering \ - --search --searchthreads 32 \ - --maxRAM 256 - - name=$dsname.IVF${nc}_2level_PQ64x4fsr - - run_on_half_machine $name.c \ - python -u track1_baseline_faiss/baseline_faiss.py \ - --dataset $dsname --indexfile $basedir/$name.faissindex \ - --indexkey OPQ64_128,IVF${ncn}_HNSW32,PQ64x4fsr \ - --maxtrain 100000000 \ - --quantizer_efConstruction 200 \ - --quantizer_add_efSearch 80 \ - --two_level_clustering \ - --search --searchthreads 32 \ - --maxRAM 256 - - done - -done - - -############################################################## -# Experiments on scale 1B -############################################################## - -# .a: build -# .b: eval w 32 threads -# .c: redo bigann eval -# .d: with ssnpp, forgot to build... -# .f: redo t2i 64x4 (eval only) - -# start with least problematic datasets (no IP search, no range search) -# msspace-1B may need to redo experiments because of ties in distance computations - -# for dsname in bigann-1B deep-1B msturing-1B msspacev-1B; do -# for dsname in bigann-1B; do -# for dsname in ssnpp-1B; do -# for nc in 1M 4M; do - -fi - -for dsname in text2image-1B; do - - for nc in 1M; do - - case $nc in - 1M) ncn=$((1<<20)) ;; - 4M) ncn=$((1<<22)) ;; - esac - - if false ;then - - name=$dsname.IVF${nc}_2level_PQ32 - - run_on_half_machine $name.e \ - python -u track1_baseline_faiss/baseline_faiss.py \ - --dataset $dsname --indexfile $basedir/$name.faissindex \ - --indexkey OPQ32_128,IVF${ncn}_HNSW32,PQ32 \ - --maxtrain 100000000 \ - --quantizer_efConstruction 200 \ - --quantizer_add_efSearch 80 \ - --two_level_clustering \ - --build --search --searchthreads 32 \ - --maxRAM 256 - - fi - name=$dsname.IVF${nc}_2level_PQ64x4fsr - - run_on_half_machine $name.g \ - python -u track1_baseline_faiss/baseline_faiss.py \ - --dataset $dsname --indexfile $basedir/$name.faissindex \ - --indexkey OPQ64_128,IVF${ncn}_HNSW32,PQ64x4fsr \ - --maxtrain 100000000 \ - --quantizer_efConstruction 200 \ - --quantizer_add_efSearch 80 \ - --two_level_clustering \ - --search --searchthreads 32 \ - --maxRAM 256 --autotune_max nprobe:513 - - - done - -done - -if false; then - -# speed up construction - -dsname=ssnpp-1B -nc=1M -ncn=$((1<<20)) - -name=$dsname.IVF${nc}_2level_aefS40_PQ32 - -un_on_half_machine $name.d \ - python -u track1_baseline_faiss/baseline_faiss.py \ - --dataset $dsname --indexfile $basedir/$name.faissindex \ - --indexkey OPQ32_128,IVF${ncn}_HNSW32,PQ32 \ - --maxtrain 100000000 \ - --quantizer_efConstruction 200 \ - --quantizer_add_efSearch 40 \ - --add_splits 30 \ - --two_level_clustering \ - --build --search --searchthreads 32 \ - --maxRAM 256 - - -# find a way to not OOM during autotune - -function ssnpp_no_OOM () { - local key=$1 - shift - dsname=ssnpp-1B - nc=1M - ncn=$((1<<20)) - - name=$dsname.IVF${nc}_2level_PQ32.search.$key - - run_on_half_machine $name.a \ - python -u track1_baseline_faiss/baseline_faiss.py \ - --dataset $dsname --indexfile $basedir/$dsname.IVF${nc}_2level_PQ32.faissindex \ - --search --searchthreads 32 \ - --maxRAM 256 "$@" -} - -ssnpp_no_OOM radius75000 --radius 75000 -ssnpp_no_OOM radius80000 --radius 80000 -ssnpp_no_OOM radius60000 --radius 60000 -ssnpp_no_OOM maxNP1024 --autotune_max nprobe:1025 -ssnpp_no_OOM maxEFS256 --autotune_max quantizer_efSearch:257 - - - -############################################################## -# Experiments with 64 bytes per vector -############################################################## - -# .a: initial run and build -# .b: re-run to get more detailed search stats - - -for dsname in bigann-1B deep-1B msturing-1B msspacev-1B; do - nc=1M - ncn=$((1<<20)) - - name=$dsname.IVF${nc}_2level_PQ64 - - run_on_half_machine $name.b \ - python -u track1_baseline_faiss/baseline_faiss.py \ - --dataset $dsname --indexfile $basedir/$name.faissindex \ - --indexkey OPQ64_128,IVF${ncn}_HNSW32,PQ64 \ - --maxtrain 100000000 \ - --quantizer_efConstruction 200 \ - --quantizer_add_efSearch 80 \ - --two_level_clustering \ - --search --searchthreads 32 \ - --maxRAM 256 - - name=$dsname.IVF${nc}_2level_PQ128x4fsr - - run_on_half_machine $name.b \ - python -u track1_baseline_faiss/baseline_faiss.py \ - --dataset $dsname --indexfile $basedir/$name.faissindex \ - --indexkey OPQ128_128,IVF${ncn}_HNSW32,PQ128x4fsr \ - --maxtrain 100000000 \ - --quantizer_efConstruction 200 \ - --quantizer_add_efSearch 80 \ - --two_level_clustering \ - --search --searchthreads 32 \ - --maxRAM 256 - -done - - - -############################################################## -# 10M scale exeperiment for text2image -############################################################## - -dsname=text2image-10M - - -for nc in 16k 65k; do - - case $nc in - 16k) ncn=$((1<<14)) ;; - 65k) ncn=$((1<<16)) ;; - esac - - # baseline - key=IVF$nc - run_on_1gpu $dsname.$key.d \ - python -u track1_baseline_faiss/baseline_faiss.py \ - --dataset $dsname --indexfile $basedir/$dsname.$key.faissindex \ - --indexkey IVF${ncn},Flat --maxtrain $((ncn * 4 * 50)) \ - --build --search --train_on_gpu - - # loss due to 2-level - key=IVF${nc}_2level - run_on_1gpu $dsname.$key.d \ - python -u track1_baseline_faiss/baseline_faiss.py \ - --dataset $dsname --indexfile $basedir/$dsname.$key.faissindex \ - --indexkey IVF${ncn},Flat --maxtrain $((ncn * 4 * 50)) \ - --build --search --two_level_clustering - - # loss due to HNSW - key=IVF${nc}_HNSW - run_on_1gpu $dsname.$key.d \ - python -u track1_baseline_faiss/baseline_faiss.py \ - --dataset $dsname --indexfile $basedir/$dsname.$key.faissindex \ - --indexkey IVF${ncn}_HNSW32,Flat --maxtrain $((ncn * 4 * 50)) \ - --quantizer_efConstruction 200 \ - --quantizer_add_efSearch 80 \ - --build --search --train_on_gpu - - # loss due to 2-level + HNSW - key=IVF${nc}_2level_HNSW - run_on_1gpu $dsname.$key.d \ - python -u track1_baseline_faiss/baseline_faiss.py \ - --dataset $dsname --indexfile $basedir/$dsname.$key.faissindex \ - --indexkey IVF${ncn}_HNSW32,Flat --maxtrain $((ncn * 4 * 50)) \ - --quantizer_efConstruction 200 \ - --quantizer_add_efSearch 80 \ - --build --search --two_level_clustering - -done - -# evaluate various IVF codes - -ncn=16384 - - -key=IVF16k,SQ8 -run_on_1gpu_learnlab $dsname.$key.b \ - python -u track1_baseline_faiss/baseline_faiss.py \ - --dataset $dsname --indexfile $basedir/$dsname.$key.faissindex \ - --indexkey RR200,IVF16384,SQ8 --maxtrain $((ncn * 4 * 50)) \ - --build --search --train_on_gpu - -key=IVF16k,SQ8_nores -run_on_1gpu_learnlab $dsname.$key.b \ - python -u track1_baseline_faiss/baseline_faiss.py \ - --dataset $dsname --indexfile $basedir/$dsname.$key.faissindex \ - --indexkey RR200,IVF16384,SQ8 --maxtrain $((ncn * 4 * 50)) \ - --build --search --train_on_gpu --by_residual 0 - -key=IVF16k,SQ6 -run_on_1gpu_learnlab $dsname.$key.b \ - python -u track1_baseline_faiss/baseline_faiss.py \ - --dataset $dsname --indexfile $basedir/$dsname.$key.faissindex \ - --indexkey RR200,IVF16384,SQ6 --maxtrain $((ncn * 4 * 50)) \ - --build --search --train_on_gpu - -key=IVF16k,SQ6_nores -run_on_1gpu_learnlab $dsname.$key.b \ - python -u track1_baseline_faiss/baseline_faiss.py \ - --dataset $dsname --indexfile $basedir/$dsname.$key.faissindex \ - --indexkey RR200,IVF16384,SQ6 --maxtrain $((ncn * 4 * 50)) \ - --build --search --train_on_gpu --by_residual 0 - - -key=IVF16k,SQ8_PQ32 -run_on_1gpu_learnlab $dsname.$key.a \ - python -u track1_baseline_faiss/baseline_faiss.py \ - --dataset $dsname --indexfile $basedir/$dsname.$key.faissindex \ - --indexkey OPQ32_128,IVF16384,PQ32 --maxtrain $((ncn * 4 * 50)) \ - --build --search --train_on_gpu - -key=IVF16k,SQ8_PQ32_nores -run_on_1gpu_learnlab $dsname.$key.a \ - python -u track1_baseline_faiss/baseline_faiss.py \ - --dataset $dsname --indexfile $basedir/$dsname.$key.faissindex \ - --indexkey OPQ32_128,IVF16384,PQ32 --maxtrain $((ncn * 4 * 50)) \ - --build --search --train_on_gpu --by_residual 0 - - -key=IVF16k,SQ4 -run_on_1gpu_learnlab $dsname.$key.b \ - python -u track1_baseline_faiss/baseline_faiss.py \ - --dataset $dsname --indexfile $basedir/$dsname.$key.faissindex \ - --indexkey RR200,IVF16384,SQ4 --maxtrain $((ncn * 4 * 50)) \ - --build --search --train_on_gpu - -key=IVF16k,SQ4_PCAR100 -run_on_1gpu_learnlab $dsname.$key.b \ - python -u track1_baseline_faiss/baseline_faiss.py \ - --dataset $dsname --indexfile $basedir/$dsname.$key.faissindex \ - --indexkey PCAR100,IVF16384,SQ4 --maxtrain $((ncn * 4 * 50)) \ - --build --search --train_on_gpu - -key=IVF16k,RR192_PQ32 -run_on_1gpu_learnlab $dsname.$key.b \ - python -u track1_baseline_faiss/baseline_faiss.py \ - --dataset $dsname --indexfile $basedir/$dsname.$key.faissindex \ - --indexkey RR192,IVF16384,PQ32 --maxtrain $((ncn * 4 * 50)) \ - --build --search --train_on_gpu - -key=IVF16k,RR192_PQ32x12 -run_on_1gpu_learnlab $dsname.$key.b \ - python -u track1_baseline_faiss/baseline_faiss.py \ - --dataset $dsname --indexfile $basedir/$dsname.$key.faissindex \ - --indexkey RR192,IVF16384,PQ32x12 --maxtrain $((ncn * 4 * 50)) \ - --build --search --train_on_gpu - - -dsname=text2image-10M - -key=IVF16k,PQ48 -run_on_1gpu $dsname.$key.c \ - python -u track1_baseline_faiss/baseline_faiss.py \ - --dataset $dsname --indexfile $basedir/$dsname.$key.faissindex \ - --indexkey OPQ48_192,IVF16384,PQ48 --maxtrain $((65536 * 50)) \ - --search --train_on_gpu - -key=IVF16k,PQ64 -run_on_1gpu $dsname.$key.c \ - python -u track1_baseline_faiss/baseline_faiss.py \ - --dataset $dsname --indexfile $basedir/$dsname.$key.faissindex \ - --indexkey OPQ64_192,IVF16384,PQ64 --maxtrain $((65536 * 50)) \ - --search --train_on_gpu - - -dsname=text2image-10M -key=IVF16k,PQ48 -run_on_1gpu $dsname.$key.c \ - python -u track1_baseline_faiss/baseline_faiss.py \ - --dataset $dsname --indexfile $basedir/$dsname.$key.faissindex \ - --indexkey OPQ48_192,IVF16384,PQ48 --maxtrain $((65536 * 50)) \ - --search --train_on_gpu - - - -## try out additive quants - - -export PYTHONPATH=/private/home/matthijs/faiss_versions/faiss_add_quant_search/build/faiss/python/build/lib:. -dsname=text2image-10M -for key in IVF16384,RQ32x8_Nfloat IVF16384,RQ31x8_Nqint8 IVF16384,LSQ32x8_Nfloat IVF16384,LSQ31x8_Nqint8 ; do -run_on_1gpu $dsname.$key.g \ - python -u track1_baseline_faiss/baseline_faiss.py \ - --dataset $dsname --indexfile $basedir/$dsname.$key.faissindex \ - --indexkey $key --maxtrain $((65536 * 50)) \ - --search --build # --train_on_gpu - -done - - -############################################################## -# GPU based search (T3) -############################################################## - -basedir=data/track3_baseline_faiss -dsname=deep-1B - -#.a: run with a too tight limit in RAM -#.b: increased RAM - -key=IVF262k,PQ8 -run_on_2gpu_ram256 T3.$dsname.$key.b \ - python -u track3_baseline_faiss/gpu_baseline_faiss.py \ - --maxRAM 256 \ - --dataset $dsname --indexkey IVF$((1<<18)),SQ8 \ - --build \ - --searchparams nprobe={1,4,16,64,256,1024} \ - --train_on_gpu --quantizer_on_gpu_add \ - --indexfile $basedir/$dsname.$key.faissindex \ - --add_splits 30 \ - --search \ - --parallel_mode 3 --quantizer_on_gpu_search - - -key=IVF1M,PQ8 -run_on_2gpu_ram256 T3.$dsname.$key.b \ - python -u track3_baseline_faiss/gpu_baseline_faiss.py \ - --maxRAM 256 \ - --dataset $dsname --indexkey IVF$((1<<20)),SQ8 \ - --build \ - --searchparams nprobe={1,4,16,64,256,1024} \ - --train_on_gpu --quantizer_on_gpu_add \ - --indexfile $basedir/$dsname.$key.faissindex \ - --add_splits 30 \ - --search \ - --parallel_mode 3 --quantizer_on_gpu_search - - -fi \ No newline at end of file diff --git a/track3_baseline_faiss/README.md b/track3_baseline_faiss/README.md deleted file mode 100644 index 903e6b9fb..000000000 --- a/track3_baseline_faiss/README.md +++ /dev/null @@ -1,73 +0,0 @@ -# Running the Faiss GPU baseline - -The script here is based on the T1 baseline, so please take a look at [the Track 1 baseline](../track1_baseline_faiss/README.md) first. - -## Installing software - -See [this doc](../track1_baseline_faiss/README.md#installing-software) but instead of installing faiss-cpu, use: - -``` -conda install -c pytorch faiss-gpu cudatoolkit=10.2 -``` - -## How to use the GPU - -This script focuses on exploiting the GPU for coarse quantization. -Therefore, it is suitable for large codebooks. - -The GPU can be used in the following phases: - -- training: `--train_with_gpu` will move the training of the coarse quantizer to GPU - -- vector adding to the index: `--quantizer_on_gpu_add --` will do the adding on GPU - -- search: `--parallel_mode 3 --quantizer_on_gpu_search` will do coarse quantization on GPU at search time. - -## Building the index and searching - -The hardware environment is: 1 GPU on a machine with 768G RAM (practically unlimited). -Therefore, the approach is to do the coarse quantization on GPU and store the IVF index in RAM with mild compression (PQ8). -This means that to keep the GPU busy the number of centroids should be as large as possible. -We use 1M in the example below. -The GPU uses brute force computations to find the nearest centroids. - -### 100M-scale - -The following command runs the index constuction and evaluates the search performance: - -```bash -python track3_baseline_faiss/gpu_baseline_faiss.py --dataset deep-100M \ - --indexkey IVF65536,SQ8 \ - --train_on_gpu \ - --build --quantizer_on_gpu_add --add_splits 30 \ - --search \ - --searchparams nprobe={1,4,16,64,256} \ - --parallel_mode 3 --quantizer_on_gpu_search -``` - -Example logs [without GPU](https://gist.github.com/mdouze/9e000be47c499f79aaec0166365ef654) and [with GPU](https://gist.github.com/mdouze/cd14c802b924299aa2a92db6e05df857) at search time. - - -### 1B-scale - -```bash -python track3_baseline_faiss/gpu_baseline_faiss.py --dataset deep-1B \ - --indexkey IVF$((1<<18)),SQ8 \ - --train_on_gpu \ - --build --quantizer_on_gpu_add --add_splits 30 \ - --search \ - --searchparams nprobe={1,4,16,64,256} \ - --parallel_mode 3 --quantizer_on_gpu_search -``` - - -For the SSNPP dataset, please use `--parallel_mode 2` instead. - - -### Results - -Similar to the track 1 results, we can plot the GPU search results in a plot of recall@10 vs. QPS. - -![](plots/T3_deep-1B.png) - -Caveat: here the GPU uses 20 CPU threads vs. 32 for the CPU, and the search is actually performed on 2 GPUs. diff --git a/track3_baseline_faiss/gpu_baseline_faiss.py b/track3_baseline_faiss/gpu_baseline_faiss.py deleted file mode 100644 index a3fc2d1e8..000000000 --- a/track3_baseline_faiss/gpu_baseline_faiss.py +++ /dev/null @@ -1,574 +0,0 @@ -import os -import sys -import time -import pdb -import gc -import numpy as np -import faiss -import argparse -import resource -import threading -from multiprocessing.pool import ThreadPool - -import benchmark.datasets -from benchmark.datasets import DATASETS -from benchmark.plotting import eval_range_search - - - -def unwind_index_ivf(index): - if isinstance(index, faiss.IndexPreTransform): - assert index.chain.size() == 1 - vt = index.chain.at(0) - index_ivf, vt2 = unwind_index_ivf(faiss.downcast_index(index.index)) - assert vt2 is None - return index_ivf, vt - if hasattr(faiss, "IndexRefine") and isinstance(index, faiss.IndexRefine): - return unwind_index_ivf(faiss.downcast_index(index.base_index)) - if isinstance(index, faiss.IndexIVF): - return index, None - else: - return None, None - -def rate_limited_iter(l): - 'a thread pre-processes the next element' - pool = ThreadPool(1) - res = None - - def next_or_None(): - try: - return next(l) - except StopIteration: - return None - - while True: - res_next = pool.apply_async(next_or_None) - if res is not None: - res = res.get() - if res is None: - return - yield res - res = res_next - - -def build_index(args, ds): - nq, d = ds.nq, ds.d - nb, d = ds.nq, ds.d - - if args.buildthreads == -1: - print("Build-time number of threads:", faiss.omp_get_max_threads()) - else: - print("Set build-time number of threads:", args.buildthreads) - faiss.omp_set_num_threads(args.buildthreads) - - metric_type = ( - faiss.METRIC_L2 if ds.distance() == "euclidean" else - faiss.METRIC_INNER_PRODUCT if ds.distance() in ("ip", "angular") else - 1/0 - ) - index = faiss.index_factory(d, args.indexkey, metric_type) - - index_ivf, vec_transform = unwind_index_ivf(index) - if vec_transform is None: - vec_transform = lambda x: x - else: - vec_transform = faiss.downcast_VectorTransform(vec_transform) - - if args.by_residual != -1: - by_residual = args.by_residual == 1 - print("setting by_residual = ", by_residual) - index_ivf.by_residual # check if field exists - index_ivf.by_residual = by_residual - - if index_ivf: - print("Update add-time parameters") - # adjust default parameters used at add time for quantizers - # because otherwise the assignment is inaccurate - quantizer = faiss.downcast_index(index_ivf.quantizer) - if isinstance(quantizer, faiss.IndexRefine): - print(" update quantizer k_factor=", quantizer.k_factor, end=" -> ") - quantizer.k_factor = 32 if index_ivf.nlist < 1e6 else 64 - print(quantizer.k_factor) - base_index = faiss.downcast_index(quantizer.base_index) - if isinstance(base_index, faiss.IndexIVF): - print(" update quantizer nprobe=", base_index.nprobe, end=" -> ") - base_index.nprobe = ( - 16 if base_index.nlist < 1e5 else - 32 if base_index.nlist < 4e6 else - 64) - print(base_index.nprobe) - - index.verbose = True - if index_ivf: - index_ivf.verbose = True - index_ivf.quantizer.verbose = True - index_ivf.cp.verbose = True - - - maxtrain = args.maxtrain - if maxtrain == 0: - if 'IMI' in args.indexkey: - maxtrain = int(256 * 2 ** (np.log2(index_ivf.nlist) / 2)) - elif index_ivf: - maxtrain = 50 * index_ivf.nlist - else: - # just guess... - maxtrain = 256 * 100 - maxtrain = max(maxtrain, 256 * 100) - print("setting maxtrain to %d" % maxtrain) - - # train on dataset - print(f"getting first {maxtrain} dataset vectors for training") - - xt2 = next(ds.get_dataset_iterator(bs=maxtrain)) - - print("train, size", xt2.shape) - assert np.all(np.isfinite(xt2)) - - t0 = time.time() - - if (isinstance(vec_transform, faiss.OPQMatrix) and - isinstance(index_ivf, faiss.IndexIVFPQFastScan)): - print(" Forcing OPQ training PQ to PQ4") - ref_pq = index_ivf.pq - training_pq = faiss.ProductQuantizer( - ref_pq.d, ref_pq.M, ref_pq.nbits - ) - vec_transform.pq - vec_transform.pq = training_pq - - if args.clustering_niter >= 0: - print(("setting nb of clustering iterations to %d" % - args.clustering_niter)) - index_ivf.cp.niter = args.clustering_niter - - train_index = None - if args.train_on_gpu: - print("add a training index on GPU") - train_index = faiss.index_cpu_to_all_gpus( - faiss.IndexFlatL2(index_ivf.d)) - index_ivf.clustering_index = train_index - - index.train(xt2) - print(" Total train time %.3f s" % (time.time() - t0)) - - if train_index is not None: - del train_index - index_ivf.clustering_index = None - gc.collect() - - print("adding") - - t0 = time.time() - - if not args.quantizer_on_gpu_add: - i0 = 0 - for xblock in ds.get_dataset_iterator(bs=args.add_bs): - i1 = i0 + len(xblock) - print(" adding %d:%d / %d [%.3f s, RSS %d kiB] " % ( - i0, i1, ds.nb, time.time() - t0, - faiss.get_mem_usage_kb())) - index.add(xblock) - i0 = i1 - elif True: - quantizer_gpu = faiss.index_cpu_to_all_gpus(index_ivf.quantizer) - - nsplit = args.add_splits - - def produce_batches(sno): - for xblock in ds.get_dataset_iterator(bs=args.add_bs, split=(nsplit, sno)): - _, assign = quantizer_gpu.search(xblock, 1) - yield xblock, assign.ravel() - - i0 = 0 - for sno in range(nsplit): - print(f"============== SPLIT {sno}/{nsplit}") - - stage2 = rate_limited_iter(produce_batches(sno)) - for xblock, assign in stage2: - i1 = i0 + len(xblock) - print(" adding %d:%d / %d [%.3f s, RSS %d kiB] " % ( - i0, i1, ds.nb, time.time() - t0, - faiss.get_mem_usage_kb())) - index.add_core( - len(xblock), - faiss.swig_ptr(xblock), - None, - faiss.swig_ptr(assign) - ) - i0 = i1 - del quantizer_gpu - gc.collect() - - - print(" add in %.3f s" % (time.time() - t0)) - if args.indexfile: - print("storing", args.indexfile) - faiss.write_index(index, args.indexfile) - - return index - - -def compute_inter(a, b): - nq, rank = a.shape - ninter = sum( - np.intersect1d(a[i, :rank], b[i, :rank]).size - for i in range(nq) - ) - return ninter / a.size - - - -def eval_setting_knn(index, xq, gt, k, inter, min_time): - nq = xq.shape[0] - gt_I, gt_D = gt - ivf_stats = faiss.cvar.indexIVF_stats - ivf_stats.reset() - nrun = 0 - - t0 = time.time() - while True: - D, I = index.search(xq, k) - nrun += 1 - t1 = time.time() - if t1 - t0 > min_time: - break - ms_per_query = ((t1 - t0) * 1000.0 / nq / nrun) - if inter: - rank = k - inter_measure = compute_inter(gt[:, :rank], I[:, :rank]) - print("%.4f" % inter_measure, end=' ') - else: - for rank in 1, 10, 100: - n_ok = (I[:, :rank] == gt[:, :1]).sum() - print("%.4f" % (n_ok / float(nq)), end=' ') - print(" %9.5f " % ms_per_query, end=' ') - - if ivf_stats.search_time == 0: - # happens for IVFPQFastScan where the stats are not logged by default - print("%12d %5.2f " % (ivf_stats.ndis / nrun, 0.0), end=' ') - else: - pc_quantizer = ivf_stats.quantization_time / ivf_stats.search_time * 100 - print("%12d %5.2f " % (ivf_stats.ndis / nrun, pc_quantizer), end=' ') - print(nrun) - - -def eval_setting_range(index, xq, gt, radius=0, inter=False, min_time=3.0, query_bs=-1): - nq = xq.shape[0] - gt_nres, gt_I, gt_D = gt - gt_lims = np.zeros(nq + 1, dtype=int) - gt_lims[1:] = np.cumsum(gt_nres) - ivf_stats = faiss.cvar.indexIVF_stats - ivf_stats.reset() - nrun = 0 - t0 = time.time() - while True: - lims, D, I = index.range_search(xq, radius) - nrun += 1 - t1 = time.time() - if t1 - t0 > min_time: - break - ms_per_query = ((t1 - t0) * 1000.0 / nq / nrun) - - ap = eval_range_search.compute_AP((gt_lims, gt_I, gt_D), (lims, I, D)) - print("%.4f" % ap, end=' ') - print(" %9.5f " % ms_per_query, end=' ') - - print("%12d %5d " % (ivf_stats.ndis / nrun, D.size), end=' ') - print(nrun) - - -class IndexQuantizerOnGPU: - """ run query quantization on GPU """ - - def __init__(self, index, search_bs): - self.search_bs = search_bs - index_ivf, vec_transform = unwind_index_ivf(index) - self.index_ivf = index_ivf - self.vec_transform = vec_transform - self.quantizer_gpu = faiss.index_cpu_to_all_gpus(self.index_ivf.quantizer) - - - def produce_batches(self, x, bs): - n = len(x) - nprobe = self.index_ivf.nprobe - ivf_stats = faiss.cvar.indexIVF_stats - for i0 in range(0, n, bs): - xblock = x[i0:i0 + bs] - t0 = time.time() - D, I = self.quantizer_gpu.search(xblock, nprobe) - ivf_stats.quantization_time += 1000 * (time.time() - t0) - yield i0, xblock, D, I - - - def search(self, x, k): - bs = self.search_bs - if self.vec_transform: - x = self.vec_transform(x) - nprobe = self.index_ivf.nprobe - n, d = x.shape - assert self.index_ivf.d == d - D = np.empty((n, k), dtype=np.float32) - I = np.empty((n, k), dtype=np.int64) - - sp = faiss.swig_ptr - stage2 = rate_limited_iter(self.produce_batches(x, bs)) - t0 = time.time() - for i0, xblock, Dc, Ic in stage2: - ni = len(xblock) - self.index_ivf.search_preassigned( - ni, faiss.swig_ptr(xblock), - k, sp(Ic), sp(Dc), - sp(D[i0:]), sp(I[i0:]), - False - ) - - return D, I - - def range_search(self, x, radius): - bs = self.search_bs - if self.vec_transform: - x = self.vec_transform(x) - nprobe = self.index_ivf.nprobe - n, d = x.shape - assert self.index_ivf.d == d - - sp = faiss.swig_ptr - rsp = faiss.rev_swig_ptr - stage2 = rate_limited_iter(self.produce_batches(x, bs)) - t0 = time.time() - all_res = [] - nres = 0 - for i0, xblock, Dc, Ic in stage2: - ni = len(xblock) - res = faiss.RangeSearchResult(ni) - - self.index_ivf.range_search_preassigned( - ni, faiss.swig_ptr(xblock), - radius, sp(Ic), sp(Dc), - res - ) - all_res.append((ni, res)) - lims = rsp(res.lims, ni + 1) - nres += lims[-1] - nres = int(nres) - lims = np.zeros(n + 1, int) - I = np.empty(nres, int) - D = np.empty(nres, 'float32') - - n0 = 0 - for ni, res in all_res: - lims_i = rsp(res.lims, ni + 1) - nd = int(lims_i[-1]) - Di = rsp(res.distances, nd) - Ii = rsp(res.labels, nd) - i0 = int(lims[n0]) - lims[n0: n0 + ni + 1] = lims_i + i0 - I[i0:i0 + nd] = Ii - D[i0:i0 + nd] = Di - n0 += ni - - return lims, D, I - - -def run_experiments_searchparams(ds, index, args): - k = args.k - - xq = ds.get_queries() - - nq = len(xq) - - ps = faiss.ParameterSpace() - ps.initialize(index) - - - # setup the Criterion object - if args.inter: - print("Optimize for intersection @ ", args.k) - header = ( - '%-40s inter@%3d time(ms/q) nb distances %%quantization #runs' % - ("parameters", args.k) - ) - else: - print("Optimize for 1-recall @ 1") - header = ( - '%-40s R@1 R@10 R@100 time(ms/q) nb distances %%quantization #runs' % - "parameters" - ) - - searchparams = args.searchparams - - print(f"Running evaluation on {len(searchparams)} searchparams") - print(header) - maxw = max(max(len(p) for p in searchparams), 40) - - if args.quantizer_on_gpu_search: - index_wrap = IndexQuantizerOnGPU(index, args.search_bs) - else: - index_wrap = index - - for params in searchparams: - ps.set_index_parameters(index, params) - - print(params.ljust(maxw), end=' ') - sys.stdout.flush() - - if ds.search_type() == "knn": - eval_setting_knn( - index_wrap, xq, ds.get_groundtruth(k=args.k), - k=args.k, inter=args.inter, min_time=args.min_test_duration - ) - else: - eval_setting_range( - index_wrap, xq, ds.get_groundtruth(), - radius=args.radius, inter=args.inter, - min_time=args.min_test_duration - ) - - - -def main(): - - parser = argparse.ArgumentParser() - - def aa(*args, **kwargs): - group.add_argument(*args, **kwargs) - - group = parser.add_argument_group('What to do') - aa('--build', default=False, action="store_true") - aa('--search', default=False, action="store_true") - aa('--prepare', default=False, action="store_true", - help="call prepare() to download the dataset before computing") - - group = parser.add_argument_group('dataset options') - aa('--dataset', choices=DATASETS.keys(), required=True) - aa('--basedir', help="override basedir for dataset") - - group = parser.add_argument_group('index consturction') - - aa('--indexkey', default='IVF1204,Flat', help='index_factory type') - aa('--by_residual', default=-1, type=int, - help="set if index should use residuals (default=unchanged)") - aa('--maxtrain', default=0, type=int, - help='maximum number of training points (0 to set automatically)') - aa('--indexfile', default='', help='file to read or write index from') - aa('--add_bs', default=100000, type=int, - help='add elements index by batches of this size') - aa('--no_precomputed_tables', action='store_true', default=False, - help='disable precomputed tables (uses less memory)') - aa('--clustering_niter', default=-1, type=int, - help='number of clustering iterations (-1 = leave default)') - aa('--train_on_gpu', default=False, action='store_true', - help='do training on GPU') - aa('--buildthreads', default=-1, type=int, - help='nb of threads to use at build time') - aa('--quantizer_on_gpu_add', action="store_true", default=False, - help="use GPU coarse quantizer at add time") - aa('--add_splits', default=1, type=int, - help="Do adds in this many splits (otherwise risk of OOM with GPU based adds)") - - group = parser.add_argument_group('searching') - - aa('--k', default=10, type=int, help='nb of nearest neighbors') - aa('--radius', default=96237, type=float, help='radius for range search') - aa('--inter', default=True, action='store_true', - help='use intersection measure instead of 1-recall as metric') - aa('--searchthreads', default=-1, type=int, - help='nb of threads to use at search time') - aa('--searchparams', nargs='+', default=['autotune'], - help="search parameters to use (can be autotune or a list of params)") - aa('--min_test_duration', default=3.0, type=float, - help='run test at least for so long to avoid jitter') - aa('--quantizer_on_gpu_search', action="store_true", default=False, - help="use GPU coarse quantizer at search time") - aa('--parallel_mode', default=-1, type=int, - help="set search-time parallel mode for IVF indexes") - aa('--search_bs', default=8192, type=int, - help='search time batch size (for GPU/CPU tiling)') - - group = parser.add_argument_group('computation options') - aa("--maxRAM", default=-1, type=int, help="set max RSS in GB (avoid OOM crash)") - - - args = parser.parse_args() - - if args.basedir: - print("setting datasets basedir to", args.basedir) - benchmark.datasets.BASEDIR - benchmark.datasets.BASEDIR = args.basedir - - if args.maxRAM > 0: - print("setting max RSS to", args.maxRAM, "GiB") - resource.setrlimit( - resource.RLIMIT_DATA, (args.maxRAM * 1024 ** 3, resource.RLIM_INFINITY) - ) - - os.system('echo -n "nb processors "; ' - 'cat /proc/cpuinfo | grep ^processor | wc -l; ' - 'cat /proc/cpuinfo | grep ^"model name" | tail -1') - - ds = DATASETS[args.dataset]() - print(ds) - - nq, d = ds.nq, ds.d - nb, d = ds.nq, ds.d - - if args.prepare: - print("downloading dataset...") - ds.prepare() - print("dataset ready") - - if not (args.build or args.search): - return - - if args.build: - print("build index, key=", args.indexkey) - index = build_index(args, ds) - else: - print("reading", args.indexfile) - index = faiss.read_index(args.indexfile) - - index_ivf, vec_transform = unwind_index_ivf(index) - if vec_transform is None: - vec_transform = lambda x: x - - if index_ivf is not None: - print("imbalance_factor=", index_ivf.invlists.imbalance_factor()) - - if args.no_precomputed_tables: - if isinstance(index_ivf, faiss.IndexIVFPQ): - print("disabling precomputed table") - index_ivf.use_precomputed_table = -1 - index_ivf.precomputed_table.clear() - - if args.indexfile: - print("index size on disk: ", os.stat(args.indexfile).st_size) - - print("current RSS:", faiss.get_mem_usage_kb() * 1024) - - precomputed_table_size = 0 - if hasattr(index_ivf, 'precomputed_table'): - precomputed_table_size = index_ivf.precomputed_table.size() * 4 - - print("precomputed tables size:", precomputed_table_size) - - if args.search: - - if args.searchthreads == -1: - print("Search threads:", faiss.omp_get_max_threads()) - else: - print("Setting nb of threads to", args.searchthreads) - faiss.omp_set_num_threads(args.searchthreads) - - if args.parallel_mode != -1: - print("setting IVF parallel mode to", args.parallel_mode) - index_ivf.parallel_mode - index_ivf.parallel_mode = args.parallel_mode - - if args.searchparams == ["autotune"]: - run_experiments_autotune(ds, index, args) - else: - run_experiments_searchparams(ds, index, args) - -if __name__ == "__main__": - main() \ No newline at end of file diff --git a/track3_baseline_faiss/plots/T3_deep-1B.png b/track3_baseline_faiss/plots/T3_deep-1B.png deleted file mode 100644 index d6902d6a0..000000000 Binary files a/track3_baseline_faiss/plots/T3_deep-1B.png and /dev/null differ