diff --git a/.github/workflows/cd.yml b/.github/workflows/cd.yml
index 3d344b02..3b08588c 100644
--- a/.github/workflows/cd.yml
+++ b/.github/workflows/cd.yml
@@ -12,27 +12,27 @@ jobs:
uses: actions/checkout@v4
- name: Free Disk Space (Ubuntu)
uses: jlumbroso/free-disk-space@v1.3.1
- # - name: 'CI docker base'
- # uses: ./.github/docker-base-action
- # with:
- # base_tag: pytorch2.2.1-cuda12.3.1-ros2humble
- # github_token: ${{ secrets.GITHUB_TOKEN }}
- # - name: cleanup
- # run: docker system prune -a -f
- # - name: 'CI docker base'
- # uses: ./.github/docker-base-action
- # with:
- # base_tag: pytorch2.2.1-ros2humble
- # github_token: ${{ secrets.GITHUB_TOKEN }}
- # - name: cleanup
- # run: docker system prune -a -f
- # - name: 'CI docker base'
- # uses: ./.github/docker-base-action
- # with:
- # base_tag: pytorch2.2.1-cuda12.3.1
- # github_token: ${{ secrets.GITHUB_TOKEN }}
- # - name: cleanup
- # run: docker system prune -a -f
+ - name: 'CI docker base'
+ uses: ./.github/docker-base-action
+ with:
+ base_tag: pytorch2.2.1-cuda12.3.1-ros2humble
+ github_token: ${{ secrets.GITHUB_TOKEN }}
+ - name: cleanup
+ run: docker system prune -a -f
+ - name: 'CI docker base'
+ uses: ./.github/docker-base-action
+ with:
+ base_tag: pytorch2.2.1-ros2humble
+ github_token: ${{ secrets.GITHUB_TOKEN }}
+ - name: cleanup
+ run: docker system prune -a -f
+ - name: 'CI docker base'
+ uses: ./.github/docker-base-action
+ with:
+ base_tag: pytorch2.2.1-cuda12.3.1
+ github_token: ${{ secrets.GITHUB_TOKEN }}
+ - name: cleanup
+ run: docker system prune -a -f
- name: 'CI docker base'
uses: ./.github/docker-base-action
with:
diff --git a/README.md b/README.md
index b1baf804..e166d8a6 100644
--- a/README.md
+++ b/README.md
@@ -4,7 +4,7 @@ See full documentation at [https://KumarRobotics.github.io/CoverageControl/](htt
Coverage control is the problem of navigating a robot swarm to collaboratively monitor features or a phenomenon of interest not known _a priori_.
The library provides a simulation environment, algorithms, and GNN-based architectures for the coverage control problem.
-
+
**Key features:**
- The core library `CoverageControlCore` is written in `C++` and `CUDA` to handle large-scale simulations
@@ -34,7 +34,7 @@ The library provides a simulation environment, algorithms, and GNN-based archite
> arXiv preprint arXiv:2401.04855 (2024).
-## Open Source Libraries Dependency
+## Acknowledgements
- [PyTorch](https://pytorch.org/)
- [PyTorch Geometric](https://pytorch-geometric.readthedocs.io/en/latest/)
- [Eigen](http://eigen.tuxfamily.org/index.php?title=Main_Page)
diff --git a/doc/Doxyfile b/doc/Doxyfile
index f9bf7a50..4d9bab9c 100644
--- a/doc/Doxyfile
+++ b/doc/Doxyfile
@@ -3,17 +3,20 @@
PROJECT_NAME = "Coverage Control Library"
PROJECT_NUMBER = $(GIT_TAG)
+EXTRACT_STATIC = YES
USE_MDFILE_AS_MAINPAGE = doc/manual/README.md
INPUT = doc/manual/README.md \
doc/manual/ref_manual.txt \
doc/manual/installation.md \
doc/manual/quick_start.md \
- doc/manual/coverage-control.md \
doc/manual/lpac.md \
+ doc/manual/coverage-control.md \
cppsrc/core \
params \
cppsrc/main/coverage_algorithm.cpp \
- python/coverage_control
+ python/coverage_control \
+ python/scripts \
+ python/utils
EXCLUDE = doc/cppsrc/torch doc/cppsrc/main/torch
OUTPUT_DIRECTORY = doc/
LAYOUT_FILE = doc/config/DoxygenLayout.xml
@@ -22,4 +25,4 @@ IMAGE_PATH = doc/graphics
HTML_EXTRA_FILES += doc/graphics/LPAC.gif doc/graphics/coveragecontrol_global.png doc/graphics/learnable_pac.png
FILTER_PATTERNS = "*.md=python doc/bash-filter.py" *.py=doc/py-filter.sh
ALIASES += repo_owner_lower="kumarrobotics"
-ALIASES += docker_cr="ghcr.io/kumarrobotics/testdoc"
+ALIASES += docker_cr="ghcr.io/kumarrobotics/coveragecontrol"
diff --git a/doc/manual/README.md b/doc/manual/README.md
index b143893c..f688b0dc 100644
--- a/doc/manual/README.md
+++ b/doc/manual/README.md
@@ -41,7 +41,7 @@ The library provides a simulation environment, algorithms, and GNN-based archite
> arXiv preprint arXiv:2401.04855 (2024).
-## External Dependencies
+## Acknowledgements
- [PyTorch](https://pytorch.org/)
- [PyTorch Geometric](https://pytorch-geometric.readthedocs.io/en/latest/)
- [Eigen](http://eigen.tuxfamily.org/index.php?title=Main_Page)
diff --git a/doc/manual/coverage-control.md b/doc/manual/coverage-control.md
index 24a6c3db..6610ac00 100644
--- a/doc/manual/coverage-control.md
+++ b/doc/manual/coverage-control.md
@@ -1,6 +1,7 @@
-\page coverage-control-problem Problem Statement
+\page coverage-control-problem Theoretical Background
\tableofcontents
+# Coverage Control Problem
## Introduction
Coverage control is the problem of navigating a robot swarm to collaboratively monitor features or a phenomenon of interest not known _a priori_.
The goal is to provide sensor coverage based on the importance of information at each point in the environment.
@@ -63,3 +64,38 @@ In such a setting, a coverage control algorithm needs to provide the following b
Designing such decentralized algorithms is challenging and can be intractable for complex systems.
This motivates us to use a learning-based approach to design a decentralized coverage control algorithm.
The \ref lpac with GNN addresses the above challenges and provides a scalable and robust solution to the problem.
+
+------
+
+# LPAC Architecture
+
+## Navigation of Robot Swarms
+Navigating a swarm of robots through an environment to achieve a common collaborative goal is a challenging problem, especially when the sensing and communication capabilities of the robots are limited.
+These problems require systems with high-fidelity algorithms comprising three key capabilities: perception, action, and communication, which are executed in a feedback loop, i.e., the Perception-Action-Communication (PAC) loop.
+To seamlessly scale the deployment of such systems across vast environments with large robot swarms, it is imperative to consider a decentralized system wherein each robot autonomously makes decisions, drawing upon its own observations and information received from neighboring robots.
+
+## The Challenge
+Designing a navigation algorithm for a decentralized system is challenging.
+The robots perform perception and action independently, while the communication module is the only component that can facilitate robot collaboration.
+Under limited communication capabilities, the robots must decide _what_ information to communicate to their neighbors and _how_ to use the received information to take appropriate actions.
+The motivation of designing this library is to study the coverage control problem as a canonical problem for the decentralized navigation of robot swarms.
+We develop the learnable PAC (LPAC) architecture that can learn to process sensor observations, communicate relevant information, and take appropriate actions.
+
+## Architecture
+The learnable Perception-Action-Communication (LPAC) architecture is composed of three different types of neural networks, one for each module of the PAC system.
+1. In the perception module, a convolution neural network (CNN) processes localized IDF observations and generates an abstract representation.
+2. In the communication module, a GNN performs computation on the output of the perception module and the messages received from neighboring robots.
+It generates a fixed-size message to communicate with the neighbors and aggregates the received information to generate a feature vector for the action module of the robot.
+3. In the action module, a shallow multilayer perceptron (MLP) predicts the control actions of the robot based on the feature vector generated by the GNN.
+
+\htmlonly
+
+Learnable Perception-Action-Communication (LPAC) architecture:
+The three modules are executed on each robot independently, with the GNN in the communication module facilitating collaboration between robots.
+
+\endhtmlonly
+
+> [LPAC: Learnable Perception-Action-Communication Loops with Applications to Coverage Control.](https://doi.org/10.48550/arXiv.2401.04855)
+> Saurav Agarwal, Ramya Muthukrishnan, Walker Gosrich, Vijay Kumar, and Alejandro Ribeiro.
+> arXiv preprint arXiv:2401.04855 (2024).
+
diff --git a/doc/manual/installation-from-source.md b/doc/manual/installation-from-source.md
deleted file mode 100644
index 502f6a3d..00000000
--- a/doc/manual/installation-from-source.md
+++ /dev/null
@@ -1,88 +0,0 @@
-\page installation-from-source Installation From Source
-\tableofcontents
-
-## Prerequisites
-
-The following packages are required to build the library:
-```bash
-sudo apt install libboost-all-dev libgmp-dev libmpfr-dev libeigen3-dev gnuplot-nox ffmpeg
-```
-\note `gnuplot-nox` and `ffmpeg` are optional (but recommended) and only required for generating environment visualizations.
-
-Additional dependencies (generally already installed):
-```bash
-sudo apt install build-essential cmake git wget python3 python3-pip python3-venv python3-dev
-```
-
-### CUDA Support
-(Optional but recommended for GPU acceleration)
-
-The package also supports GPU acceleration using CUDA. To enable this feature, the following additional packages are required:
-- [`cmake`](https://cmake.org/download/) (version 3.24 or higher)
-- `cuda` (version 11.8 or higher, 12.1 recommended)
-
-\note On Ubuntu, latest `cmake` version can be installed from the official [Kitware APT Repository](https://apt.kitware.com/).
-
---------
-
-## Building the Core Library
-
-We will organize files in a **workspace** directory: `${CoverageControl_ws}` (e.g., ~/CoverageControl\_ws).
-
-Add the following lines to your `~/.bashrc` file.
-```bash
-export CoverageControl_ws=~/CoverageControl_ws # Change to your workspace directory
-export PATH=${CoverageControl_ws}/install/bin:$PATH
-export LD_LIBRARY_PATH=${CoverageControl_ws}/install/lib:$LD_LIBRARY_PATH
-```
-
-Clone the repository:
-```bash
-mkdir -p ${CoverageControl_ws}/src
-git clone https://github.com/KumarRobotics/CoverageControl.git \
- ${CoverageControl_ws}/src/CoverageControl
-```
-
-The primary setup script is `setup.sh` located in the root of the repository.
-```bash
-cd ${CoverageControl_ws}/src/CoverageControl
-bash setup.sh --with-deps -d ${CoverageControl_ws}
-```
-
-There are multiple options for building the library.
-
-Option | Description
---- | ---
-`-d
` | The workspace directory
-`-p` | Build and install `python` packages (See [CoverageControlTorch Python Package](#coveragecontroltorch-python-package))
-`--with-cuda` | Build with CUDA support
-`--with-deps` | Install dependencies (CGAL 5.6)
-
-
-\warning Ubuntu 22.04 (Jammy) has CGAL 5.4 (libcgal-dev) in the official repositories, which has bugs and is not compatible with the library. The package requires `CGAL 5.6`, which is installed if `--with-deps` is used. The `--with-deps` option is only required for the first build as the downloaded files will persist in the workspace installation directory (`${CoverageControl_ws}/install`).
-
---------
-
-## Python Packages
-
-The library provides two `python` packages:
-- `%CoverageControl` (bindings for the core library)
-- `%CoverageControlTorch` (classes, utilities, and scripts for training and evaluating neural network policies)
-
-These can be installed by adding the `-p` option to the `setup.sh` script:
-```bash
-cd ${CoverageControl_ws}/src/CoverageControl
-bash setup.sh -p -d ${CoverageControl_ws}
-```
-\note It is recommended that `python` bindings are built inside a virtual environment.
-
-Test the installation by running the following commands:
-```bash
-python ${CoverageControl_ws}/src/CoverageControl/python/tests/coverage_algorithm.py
-```
-
-The `CoverageControlTorch` is built on top of `pytorch` and `pytorch-geometric`. Depending of whether you have `CUDA` installed, you can use either of the following files to install the package:
-- `setup_utils/requirements.txt` (for GPU)
-- `setup_utils/requirements_cpu.txt` (for CPU)
-
-\note Please change the `torch` and `torch-geometric` versions in the file to match your CUDA version.
diff --git a/doc/manual/installation.md b/doc/manual/installation.md
index 5617005b..3f070aa3 100644
--- a/doc/manual/installation.md
+++ b/doc/manual/installation.md
@@ -1,19 +1,41 @@
\page installation Installation
\tableofcontents
-This page provides instructions for installing the Coverage Control library.
-The library can be installed using Docker or from source code.
-The Docker installation is the easiest way to get started, but the source installation provides more flexibility and control over the installation and usage of the library.
+# PyPI Installation
+The library is available on PyPI and can be installed using `pip`.
+It is recommended to install the library inside a virtual environment.
+```bash
+pip install coverage_control
+```
+
+The package depends on the following packages, which are installed as dependencies:
+- [PyTorch](https://pytorch.org/)
+- [PyTorch Geometric](https://pytorch-geometric.readthedocs.io/en/latest/)
+
+\note PyTorch and PyTorch Geometric have CPU and CUDA-specific versions. The package installs the default version (latest CUDA).
+
+We need the following optional packages for visualization and video generation:
+- `gnuplot` or `gnuplot-nox` (for visualizing environment)
+- `ffmpeg` (for generating videos)
+
+On Ubuntu, these can be installed using the following command:
+```bash
+sudo apt install gnuplot-nox ffmpeg
+```
+
+--------
# Docker Installation
+Docker images are available for the library with different configurations and versions.
+
## Prerequisites (Optional)
We will organize files in a **workspace** directory: `${CoverageControl_ws}` (e.g., ~/CoverageControl\_ws).
The workspace directory is mounted to the docker container.
-Add the following lines to your `~/.bashrc` file for convenience.
+Add the following lines to your `${HOME}/.bashrc` file for convenience.
```bash
-export CoverageControl_ws=~/CoverageControl_ws # Change to your workspace directory
+export CoverageControl_ws=${HOME}/CoverageControl_ws # Change to your workspace directory
```
\note Close and reopen the terminal for the changes to take effect.
@@ -59,35 +81,12 @@ The base image is `ghcr.io/\repo_owner_lower/coveragecontrol` with different tag
|`python2.2.1-ros2humble` | `--with-ros`|
|`python2.2.1` | None|
---------
-
-## Building and Executing
-
The library is already built and installed in the container.
-However, if you want to build it again, you can do so using the following commands.
-
-The primary setup script is `setup.sh` located in the root of the repository.
-```bash
-cd ${CoverageControl_ws}/src/CoverageControl
-bash setup.sh -p --with-cuda -d ${CoverageControl_ws}
-```
-
-There are multiple options for building the library.
+However, if you want to build it again, you can do so following the [Installation from Source](#installation-from-source) instructions (except for the prerequisites).
-Option | Description
---- | ---
-`-d [dir]` | The workspace directory
-`-p` | Build and install `python` bindings and `CoverageControlTorch` package
-`-g` | Installs globally (builds inside the workspace directory if `-d` is specified)
-`--with-cuda` | Build with CUDA support
-`--with-deps` | Install dependencies (Not required if using the docker image)
-
-**Testing:**
-```bash
-coverage_algorithm
-```
+--------
-# Installation from source
+# Installation From Source {#installation-from-source}
## Prerequisites
The following packages are required to build the library:
@@ -112,7 +111,25 @@ The package also supports GPU acceleration using CUDA. To enable this feature, t
--------
-## Building the Core Library
+## Automated Installation
+
+```bash
+pip install .
+```
+
+Testing the installation (from the root of the repository):
+
+Download the file `pytest_data.tar.gz` from the repository's release page and extract it to `python/tests/`.
+This will create a directory `python/tests/data`.
+
+Then run the following commands:
+```bash
+pip install pytest
+pytest
+```
+
+
+## Building the Core C++ Library
We will organize files in a **workspace** directory: `${CoverageControl_ws}` (e.g., ~/CoverageControl\_ws).
@@ -136,40 +153,19 @@ cd ${CoverageControl_ws}/src/CoverageControl
bash setup.sh --with-deps -d ${CoverageControl_ws}
```
+Testing the installation:
+```bash
+coverage_algorithm
+```
+
There are multiple options for building the library.
Option | Description
--- | ---
`-d ` | The workspace directory
-`-p` | Build and install `python` packages (See [CoverageControlTorch Python Package](#coveragecontroltorch-python-package))
`--with-cuda` | Build with CUDA support
-`--with-deps` | Install dependencies (CGAL 5.6)
-\warning Ubuntu 22.04 (Jammy) has CGAL 5.4 (libcgal-dev) in the official repositories, which has bugs and is not compatible with the library. The package requires `CGAL 5.6`, which is installed if `--with-deps` is used. The `--with-deps` option is only required for the first build as the downloaded files will persist in the workspace installation directory (`${CoverageControl_ws}/install`).
+\warning Ubuntu 22.04 (Jammy) has CGAL 5.4 (libcgal-dev) in the official repositories, which has bugs and is not compatible with the library. The package requires `CGAL 5.6`, which is automatically installed from the official CGAL repository through `CMake`.
--------
-
-## Python Packages
-
-The library provides two `python` packages:
-- `%CoverageControl` (bindings for the core library)
-- `%CoverageControlTorch` (classes, utilities, and scripts for training and evaluating neural network policies)
-
-These can be installed by adding the `-p` option to the `setup.sh` script:
-```bash
-cd ${CoverageControl_ws}/src/CoverageControl
-bash setup.sh -p -d ${CoverageControl_ws}
-```
-\note It is recommended that `python` bindings are built inside a virtual environment.
-
-Test the installation by running the following commands:
-```bash
-python ${CoverageControl_ws}/src/CoverageControl/python/tests/coverage_algorithm.py
-```
-
-The `CoverageControlTorch` is built on top of `pytorch` and `pytorch-geometric`. Depending of whether you have `CUDA` installed, you can use either of the following files to install the package:
-- `setup_utils/requirements.txt` (for GPU)
-- `setup_utils/requirements_cpu.txt` (for CPU)
-
-\note Please change the `torch` and `torch-geometric` versions in the file to match your CUDA version.
diff --git a/doc/manual/lpac.md b/doc/manual/lpac.md
index 089e721d..460e25dd 100644
--- a/doc/manual/lpac.md
+++ b/doc/manual/lpac.md
@@ -1,33 +1,76 @@
-\page lpac LPAC Architecture
+\page lpac LPAC Neural Network
\tableofcontents
-## Navigation of Robot Swarms
-Navigating a swarm of robots through an environment to achieve a common collaborative goal is a challenging problem, especially when the sensing and communication capabilities of the robots are limited.
-These problems require systems with high-fidelity algorithms comprising three key capabilities: perception, action, and communication, which are executed in a feedback loop, i.e., the Perception-Action-Communication (PAC) loop.
-To seamlessly scale the deployment of such systems across vast environments with large robot swarms, it is imperative to consider a decentralized system wherein each robot autonomously makes decisions, drawing upon its own observations and information received from neighboring robots.
-
-## The Challenge
-However, designing a navigation algorithm for a decentralized system is challenging.
-The robots perform perception and action independently, while the communication module is the only component that can facilitate robot collaboration.
-Under limited communication capabilities, the robots must decide _what_ information to communicate to their neighbors and _how_ to use the received information to take appropriate actions.
-The motivation of designing this library is to study the coverage control problem as a canonical problem for the decentralized navigation of robot swarms.
-We develop the learnable PAC (LPAC) architecture that can learn to process sensor observations, communicate relevant information, and take appropriate actions.
-
-## LPAC Architecture
-The learnable Perception-Action-Communication (LPAC) architecture is composed of three different types of neural networks, one for each module of the PAC system.
-1. In the perception module, a convolution neural network (CNN) processes localized IDF observations and generates an abstract representation.
-2. In the communication module, a GNN performs computation on the output of the perception module and the messages received from neighboring robots.
-It generates a fixed-size message to communicate with the neighbors and aggregates the received information to generate a feature vector for the action module of the robot.
-3. In the action module, a shallow multilayer perceptron (MLP) predicts the control actions of the robot based on the feature vector generated by the GNN.
-
-\htmlonly
-
-Learnable Perception-Action-Communication (LPAC) architecture:
-The three modules are executed on each robot independently, with the GNN in the communication module facilitating collaboration between robots.
-
-\endhtmlonly
-
-> [LPAC: Learnable Perception-Action-Communication Loops with Applications to Coverage Control.](https://doi.org/10.48550/arXiv.2401.04855)
-> Saurav Agarwal, Ramya Muthukrishnan, Walker Gosrich, Vijay Kumar, and Alejandro Ribeiro.
-> arXiv preprint arXiv:2401.04855 (2024).
+## Prelminaries
+We will organize files in a **workspace** directory: `${CoverageControl_ws}` (e.g., ~/CoverageControl\_ws).
+Download and extract the file `lpac_CoverageControl.tar.gz` to the workspace directory.
+The file can be downloaded from the repository's release page.
+```bash
+tar -xvzf lpac_CoverageControl.tar.gz -C ${CoverageControl_ws}
+```
+This will create a directory `lpac` in the workspace directory.
+The directory structure is as follows:
+```bash
+${CoverageControl_ws}/
+└── lpac/
+ ├── data/ # To store datasets
+ ├── envs/ # Environment files
+ ├── eval/ # Results of evaluation
+ ├── models/ # Trained models
+ └── params/ # Parameters for training and evaluation
+```
+
+The models folder already contains a trained LPAC model for a 1024x1024 environment with 32 robots, 32 features, and 128 communication radius.
+
+## Dataset Generation
+
+There are two ways to classes for dataset generation located in `python/scripts/data_generation/`
+1. `simple_data_generation.py`
+2. `data_generation.py`
+
+They are similar, except that `data_generation.py` splits the dataset into training, validation, and test sets.
+
+To generate a dataset, run the following command:
+```bash
+python python/scripts/data_generation/data_generation.py \
+ ${CoverageControl_ws}/lpac/params/data_params.toml
+```
+
+A sample `data_params.toml` file is also provided in the `params` directory of the repository.
+See the file for details on the parameters.
+The class will use a `coverage_control_params.toml` configuration file to generate environments and then use the `ClairvoyantCVT` algorithm to generate the dataset.
+
+The `simple_data_generation.py` is useful for generating a large dataset in parts and then combining them into a single dataset.
+See `python/utils/process_data.sh` and `python/utils/dataset_utils.py` for tools to process and combine datasets.
+
+## Training
+
+To train the LPAC model, run the following command:
+```bash
+python python/scripts/training/train_lpac.py \
+ ${CoverageControl_ws}/lpac/params/learning_params.toml 1024
+```
+
+The second argument is the environment size, used to normalize the input features.
+A sample `learning_params.toml` file is also provided in the `params` directory of the repository. See the file for details on the parameters.
+
+## Evaluation
+There are two scripts for evaluation located in `python/scripts/evaluators/`
+1. [eval_single_env.py](python/scripts/evaluators/eval_single_env.py)
+2. [eval.py](python/scripts/evaluators/eval.py)
+
+`eval_single_env.py` evaluates a single environment and `eval.py` evaluates multiple environments.
+
+To evaluate a trained model, run the following command:
+```bash
+python python/scripts/evaluators/eval.py \
+ ${CoverageControl_ws}/lpac/params/eval.toml
+```
+or
+```bash
+python python/scripts/evaluators/eval_single_dataset.py \
+ ${CoverageControl_ws}/lpac/params/eval_single.toml
+```
+
+The `eval.toml` and `eval_single.toml` files are also provided in the `params` directory of the repository.
diff --git a/doc/manual/quick_start.md b/doc/manual/quick_start.md
index 06a246d2..2cfc3a7f 100644
--- a/doc/manual/quick_start.md
+++ b/doc/manual/quick_start.md
@@ -12,11 +12,10 @@ See \ref params/coverage_control_params.toml for an example configuration file.
# Python Interface
-Import the `CoverageControl` library and the `ClairvoyantCVT` algorithm.
+Import the `coverage_control` library and the `ClairvoyantCVT` algorithm.
```python
-import sys
-import CoverageControl # Main library
-from CoverageControl import ClairvoyantCVT as CoverageAlgorithm
+import coverage_control as cc
+from coverage_control import ClairvoyantCVT as CoverageAlgorithm
```
You can choose one of the following algorithms instead of `ClairvoyantCVT`:
@@ -27,15 +26,15 @@ You can choose one of the following algorithms instead of `ClairvoyantCVT`:
Create a `CoverageControl::Parameters` object and load the configuration file:
```python
-params = CoverageControl.Parameters() # for default parameters
+params = cc.Parameters() # for default parameters
```
Create a simulation environment:
```python
-env = CoverageControl.CoverageSystem(params)
+env = cc.CoverageSystem(params)
```
-Plot the initial environment:
+Plot the initial environment (needs `gnuplot` installed):
```python
env.PlotInitMap("init_map");
```
@@ -43,7 +42,7 @@ env.PlotInitMap("init_map");
Print the initial coverage cost:
```python
init_cost = env.GetObjectiveValue()
-print("Initial Coverage cost: " + str('{:.2e}'.format(init_cost)))
+print(f"Initial Coverage cost: {init_cost:.2e}")
```
Create a controller using the `CoverageAlgorithm` and the environment:
@@ -58,20 +57,22 @@ for i in range(0, params.pEpisodeSteps):
controller.ComputeActions();
# Get actions from the controller
actions = controller.GetActions()
+
# Send actions to the environment
if env.StepActions(actions):
- print("Error in step " + str(i))
+ print(f"Error in step {i}")
break
if controller.IsConverged():
- print("Converged in step " + str(i))
+ print(f"Converged in step {i}")
break
+
```
Print improvement in cost:
```python
current_cost = env.GetObjectiveValue()
-print("Improvement %: " + str('{:.2f}'.format(100 * (init_cost - current_cost)/init_cost)))
+print(f"Improvement %: {100 * (init_cost - current_cost)/init_cost:.2f}")
```
Plot the final state of the environment:
@@ -80,7 +81,7 @@ env.PlotSystemMap("final_map");
```
-See \ref python/tests/coverage_simple.py and \ref python/tests/coverage_class.py for complete examples.
+See \ref python/scripts/coverage_env/coverage_simple.py and \ref python/scripts/coverage_env/coverage_class.py for complete examples.
---
diff --git a/params/data_params.toml b/params/data_params.toml
index 8d4eb7c3..feedabb4 100644
--- a/params/data_params.toml
+++ b/params/data_params.toml
@@ -1,5 +1,5 @@
-DataDir = "${CoverageControl_ws}/datasets/lpac" # Absolute location
-EnvironmentConfig = "${CoverageControl_ws}/datasets/lpac/coverage_control_params.toml" # Absolute location
+DataDir = "${CoverageControl_ws}/lpac" # Absolute location
+EnvironmentConfig = "${CoverageControl_ws}/lpac/params/coverage_control_params.toml" # Absolute location
# Only required for data generation using C++
# The generator requires a TorchVision JIT transformer model
diff --git a/params/eval.toml b/params/eval.toml
index f53e3b1e..81bce591 100644
--- a/params/eval.toml
+++ b/params/eval.toml
@@ -1,16 +1,16 @@
-EvalDir = "${CoverageControl_ws}/datasets/lpac/eval/" # Absolute location
-EnvironmentConfig = "${CoverageControl_ws}/datasets/lpac/coverage_control_params.toml" # Absolute location
+EvalDir = "${CoverageControl_ws}/lpac/eval/" # Absolute location
+EnvironmentConfig = "${CoverageControl_ws}/lpac/params/coverage_control_params.toml" # Absolute location
-EnvironmentDataDir = "${CoverageControl_ws}/datasets/lpac/envs/" # Absolute location
-NumEnvironments = 100
+EnvironmentDataDir = "${CoverageControl_ws}/lpac/envs/" # Absolute location
+NumEnvironments = 2
NumSteps = 600
[[Controllers]]
Name = "lpac"
Type = "Learning"
# ModelFile: "~/CoverageControl_ws/datsets/lpac/models/model_k3_1024.pt"
-ModelStateDict = "${CoverageControl_ws}/datasets/lpac/models/model_k3_1024_state_dict.pt"
-LearningParams = "${CoverageControl_ws}/datasets/lpac/learning_params.toml"
+ModelStateDict = "${CoverageControl_ws}/lpac/models/model_k3_1024_state_dict.pt"
+LearningParams = "${CoverageControl_ws}/lpac/params/learning_params.toml"
UseCommMap = true
UseCNN = true
CNNMapSize = 32
diff --git a/params/eval_single.toml b/params/eval_single.toml
index 23025619..e983b7ca 100644
--- a/params/eval_single.toml
+++ b/params/eval_single.toml
@@ -1,7 +1,7 @@
-EvalDir = "${CoverageControl_ws}/datasets/lpac/eval/" # Absolute location
-EnvironmentConfig = "${CoverageControl_ws}/datasets/lpac/coverage_control_params.toml" # Absolute location
+EvalDir = "${CoverageControl_ws}/lpac/eval/" # Absolute location
+EnvironmentConfig = "${CoverageControl_ws}/lpac/params/coverage_control_params.toml" # Absolute location
-EnvironmentDataDir = "${CoverageControl_ws}/datasets/lpac/envs/" # Absolute location
+EnvironmentDataDir = "${CoverageControl_ws}/lpac/envs/" # Absolute location
FeatureFile = "0.env" # Relative to EnvironmentDataDir
RobotPosFile = "0.pos" # Relative to EnvironmentDataDir
@@ -14,8 +14,8 @@ GenerateVideo = false # Will generate a video for each controller
Name = "lpac"
Type = "Learning"
# ModelFile: "~/CoverageControl_ws/datsets/lpac/models/model_k3_1024.pt"
-ModelStateDict = "${CoverageControl_ws}/datasets/lpac/models/model_k3_1024_state_dict.pt"
-LearningParams = "${CoverageControl_ws}/datasets/lpac/learning_params.toml"
+ModelStateDict = "${CoverageControl_ws}/lpac/models/model_k3_1024_state_dict.pt"
+LearningParams = "${CoverageControl_ws}/lpac/params/learning_params.toml"
UseCommMap = true
UseCNN = true
CNNMapSize = 32
diff --git a/params/learning_params.toml b/params/learning_params.toml
index dab4f7a4..f54ee9f8 100644
--- a/params/learning_params.toml
+++ b/params/learning_params.toml
@@ -1,4 +1,4 @@
-DataDir = "${CoverageControl_ws}/datasets/lpac/" # Absolute location
+DataDir = "${CoverageControl_ws}/lpac/" # Absolute location
GPUs = [0, 1]
NumWorkers = 4
@@ -6,12 +6,12 @@ NumWorkers = 4
# If a model is already present, it will be loaded
# Similarly, for the optimizer
[LPACModel]
-Dir = "${CoverageControl_ws}/datasets/lpac/models/"
+Dir = "${CoverageControl_ws}/lpac/models/"
Model = "model.pt"
Optimizer = "optimizer.pt"
[CNNModel]
-Dir = "${CoverageControl_ws}/datasets/lpac/models/" # Absolute location
+Dir = "${CoverageControl_ws}/lpac/models/" # Absolute location
Model = "model.pt"
Optimizer = "optimizer.pt"
diff --git a/pyproject.toml b/pyproject.toml
index 24ab55da..e439440e 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -26,10 +26,11 @@ classifiers = [
dynamic = ["version"]
dependencies = ["numpy", "pyyaml",
'toml; python_version < "3.11"',
+ "torch", "torchvision", "torch_geometric",
]
[project.optional-dependencies]
-nn = ["torch >=2.1", "torchvision >=2.1"]
+nn = ["torch >=2.1", "torchvision >=2.1", "torch_geometric >=2.1"]
core_test = ["pytest >=6"]
test = ["pytest >=6", "torch >=2.1", "torchvision"]
@@ -95,3 +96,17 @@ repair-wheel-command = "delocate-wheel --require-archs {delocate_archs} -w {dest
[tool.pytest.ini_options]
addopts = ["--ignore=python/tests/deprecated", "-ra", "--showlocals"]
testpaths = ["python/tests"]
+
+[tool.pylint]
+py-version = "3.10"
+ignore-paths = [".*/_version.py", "cppsrc/*", "setup_utils/*", "python/coverage_control/_version.py"]
+extension-pkg-allow-list = ["coverage_control._core"]
+reports.output-format = "colorized"
+similarities.ignore-imports = "yes"
+# messages_control.disable = [
+# "design",
+# "fixme",
+# "line-too-long",
+# "missing-module-docstring",
+# "wrong-import-position",
+# ]
diff --git a/python/coverage_control/__init__.py b/python/coverage_control/__init__.py
index 7a85708f..e7e5f894 100644
--- a/python/coverage_control/__init__.py
+++ b/python/coverage_control/__init__.py
@@ -4,15 +4,12 @@
coverage_control: Library for large-scale coverage control using robot swarms
"""
-
from __future__ import annotations
-
from ._version import version as __version__
from .core import *
from .io_utils import IOUtils
-# from .nn import *
-__name__ = "coverage_control"
+# from .nn import *
-__all__ = ["__version__", "core", "nn", "IOUtils", "__name__"]
+__all__ = ["__version__", "core", "nn", "IOUtils"]
diff --git a/python/coverage_control/algorithms/__init__.py b/python/coverage_control/algorithms/__init__.py
index 609bd28b..6e0c40c7 100644
--- a/python/coverage_control/algorithms/__init__.py
+++ b/python/coverage_control/algorithms/__init__.py
@@ -1,8 +1,9 @@
+"""
+Provides CVT-based algorithms for coverage control.
+"""
from __future__ import annotations
-__name__ = "algorithms"
-
-from .._core import NearOptimalCVT, ClairvoyantCVT, CentralizedCVT, DecentralizedCVT
+from .._core import CentralizedCVT, ClairvoyantCVT, DecentralizedCVT, NearOptimalCVT
__all__ = ["NearOptimalCVT", "ClairvoyantCVT", "CentralizedCVT", "DecentralizedCVT"]
diff --git a/python/coverage_control/core/__init__.py b/python/coverage_control/core/__init__.py
index 3d458d20..23a26721 100644
--- a/python/coverage_control/core/__init__.py
+++ b/python/coverage_control/core/__init__.py
@@ -1,11 +1,39 @@
-from __future__ import annotations
+"""
+Core module for the coverage_control package.
+"""
-__name__ = "core"
+from __future__ import annotations
-from .._core import Point2, PointVector, DblVector, DblVectorVector
-from .._core import PolygonFeature, VoronoiCell, VoronoiCells
-from .._core import Parameters
-from .._core import BivariateNormalDistribution, BNDVector, WorldIDF, RobotModel, CoverageSystem
-from .._core import CudaUtils
+from .._core import (
+ BivariateNormalDistribution,
+ BNDVector,
+ CoverageSystem,
+ CudaUtils,
+ DblVector,
+ DblVectorVector,
+ Parameters,
+ Point2,
+ PointVector,
+ PolygonFeature,
+ RobotModel,
+ VoronoiCell,
+ VoronoiCells,
+ WorldIDF,
+)
-__all__ = ["Point2", "PointVector", "DblVector", "DblVectorVector", "PolygonFeature", "VoronoiCell", "VoronoiCells", "BivariateNormalDistribution", "BNDVector", "WorldIDF", "RobotModel", "CoverageSystem", "Parameters", "CudaUtils", "__name__"]
+__all__ = [
+ "Point2",
+ "PointVector",
+ "DblVector",
+ "DblVectorVector",
+ "PolygonFeature",
+ "VoronoiCell",
+ "VoronoiCells",
+ "BivariateNormalDistribution",
+ "BNDVector",
+ "WorldIDF",
+ "RobotModel",
+ "CoverageSystem",
+ "Parameters",
+ "CudaUtils",
+]
diff --git a/python/coverage_control/io_utils.py b/python/coverage_control/io_utils.py
index a10aaef0..b845a04f 100644
--- a/python/coverage_control/io_utils.py
+++ b/python/coverage_control/io_utils.py
@@ -19,15 +19,25 @@
# You should have received a copy of the GNU General Public License along with
# CoverageControl library. If not, see .
+## @file io_utils.py
+# @brief The module provides utility functions for loading data from files
+
+"""
+The module provides utility functions for loading data from files
+"""
+
import os
import sys
+
+import torch
+import yaml
+
if sys.version_info[1] < 11:
import tomli as tomllib
else:
import tomllib
-import yaml
-import torch
+## @ingroup python_api
class IOUtils:
"""
Class provides the following utility functions:
@@ -38,6 +48,10 @@ class IOUtils:
@staticmethod
def sanitize_path(path_str: str) -> str:
+ """
+ Function to sanitize a path string
+ """
+
return os.path.normpath(os.path.expanduser(os.path.expandvars(path_str)))
@staticmethod
@@ -58,15 +72,18 @@ def load_tensor(path: str) -> torch.tensor:
"""
# Throw error if path does not exist
path = IOUtils.sanitize_path(path)
+
if not os.path.exists(path):
raise FileNotFoundError(f"IOUtils::load_tensor: File not found: {path}")
# Load data
data = torch.load(path)
# Extract tensor if data is in jit script format
+
if isinstance(data, torch.jit.ScriptModule):
tensor = list(data.parameters())[0]
else:
tensor = data
+
return tensor
@staticmethod
@@ -86,19 +103,26 @@ def load_yaml(path: str) -> dict:
path = IOUtils.sanitize_path(path)
# Throw error if path does not exist
+
if not os.path.exists(path):
raise FileNotFoundError(f"IOUtils::load_yaml File not found: {path}")
# Load data
- with open(path, "r") as f:
+ with open(path, "rb") as f:
data = yaml.load(f, Loader=yaml.FullLoader)
+
return data
@staticmethod
- def load_toml(path: str) -> dict: # Throw error if path does not exist
+ def load_toml(path: str) -> dict: # Throw error if path does not exist
+ """
+ Function to load a toml file
+ """
path = IOUtils.sanitize_path(path)
+
if not os.path.exists(path):
raise FileNotFoundError(f"IOUtils::load_toml: File not found: {path}")
# Load data
with open(path, "rb") as f:
data = tomllib.load(f)
+
return data
diff --git a/python/coverage_control/nn/__init__.py b/python/coverage_control/nn/__init__.py
index a6f2d2f1..c52a6766 100644
--- a/python/coverage_control/nn/__init__.py
+++ b/python/coverage_control/nn/__init__.py
@@ -1,10 +1,22 @@
-from __future__ import annotations
+"""
+Provides neural network functionality for the coverage coverage control problem.
+"""
-__name__ = "nn"
+from __future__ import annotations
from .data_loaders import *
from .models.cnn import CNN
from .models.lpac import LPAC
from .trainers import TrainModel
-__all__ = ["__name__", "DataLoaderUtils", "CoverageEnvUtils", "LocalMapCNNDataset", "LocalMapGNNDataset", "CNNGNNDataset", "VoronoiGNNDataset", "CNN", "LPAC", "TrainModel"]
+__all__ = [
+ "DataLoaderUtils",
+ "CoverageEnvUtils",
+ "LocalMapCNNDataset",
+ "LocalMapGNNDataset",
+ "CNNGNNDataset",
+ "VoronoiGNNDataset",
+ "CNN",
+ "LPAC",
+ "TrainModel",
+]
diff --git a/python/coverage_control/nn/data_loaders/__init__.py b/python/coverage_control/nn/data_loaders/__init__.py
index 63371229..0028b3c9 100644
--- a/python/coverage_control/nn/data_loaders/__init__.py
+++ b/python/coverage_control/nn/data_loaders/__init__.py
@@ -1,9 +1,23 @@
-from __future__ import annotations
+"""
+This module contains the data loader utilities for the coverage environment.
+"""
-__name__ = "data_loaders"
+from __future__ import annotations
-from .data_loader_utils import DataLoaderUtils
-from .loaders import LocalMapCNNDataset, LocalMapGNNDataset, CNNGNNDataset, VoronoiGNNDataset
from .coverage_env_utils import CoverageEnvUtils
+from .data_loader_utils import DataLoaderUtils
+from .loaders import (
+ CNNGNNDataset,
+ LocalMapCNNDataset,
+ LocalMapGNNDataset,
+ VoronoiGNNDataset,
+)
-__all__ = ["__name__", "DataLoaderUtils", "CoverageEnvUtils", "LocalMapCNNDataset", "LocalMapGNNDataset", "CNNGNNDataset", "VoronoiGNNDataset"]
+__all__ = [
+ "DataLoaderUtils",
+ "CoverageEnvUtils",
+ "LocalMapCNNDataset",
+ "LocalMapGNNDataset",
+ "CNNGNNDataset",
+ "VoronoiGNNDataset",
+]
diff --git a/python/coverage_control/nn/data_loaders/coverage_env_utils.py b/python/coverage_control/nn/data_loaders/coverage_env_utils.py
index ad76e39c..a0cf5579 100644
--- a/python/coverage_control/nn/data_loaders/coverage_env_utils.py
+++ b/python/coverage_control/nn/data_loaders/coverage_env_utils.py
@@ -19,29 +19,32 @@
# You should have received a copy of the GNU General Public License along with
# CoverageControl library. If not, see .
+"""
+Utility functions for coverage environment
+"""
+
## @file coverage_env_utils.py
# @brief Utility functions for coverage environment
import math
+
import numpy
-import copy
+
# import cv2
import torch
-import torchvision
import torch_geometric
-from torch_geometric.data import Dataset
-from scipy.ndimage import gaussian_filter
+import torchvision
from scipy.spatial import distance_matrix
-from ...core import PointVector, DblVector, DblVectorVector
-from ...core import Parameters
-from ...core import CoverageSystem
+from ...core import CoverageSystem, DblVector, DblVectorVector, Parameters, PointVector
+
## @ingroup python_api
class CoverageEnvUtils:
"""
Class for utility functions for coverage environment
"""
+
@staticmethod
def to_tensor(data: object) -> torch.Tensor:
"""
@@ -63,25 +66,34 @@ def to_tensor(data: object) -> torch.Tensor:
ValueError: if data type is not supported
"""
+
if isinstance(data, numpy.ndarray):
return torch.from_numpy(numpy.copy(data.astype(numpy.float32)))
- elif isinstance(data, PointVector):
+
+ if isinstance(data, PointVector):
data_tensor = torch.Tensor(len(data), 2)
- for i in range(len(data)):
+
+ for i, _ in enumerate(data):
data_tensor[i] = CoverageEnvUtils.to_tensor(data[i])
+
return data_tensor
- elif isinstance(data, DblVectorVector):
+
+ if isinstance(data, DblVectorVector):
data_tensor = torch.Tensor(len(data))
- for i in range(len(data)):
+
+ for i, _ in enumerate(data):
data_tensor[i] = CoverageEnvUtils.to_tensor(data[i])
+
return data_tensor
- elif isinstance(data, DblVector):
+
+ if isinstance(data, DblVector):
data_tensor = torch.Tensor(len(data))
- for i in range(len(data)):
+
+ for i, _ in enumerate(data):
data_tensor[i] = float(data[i])
+
return data_tensor
- else:
- raise ValueError('Unknown data type: {}'.format(type(data)))
+ raise ValueError(f"Unknown data type: {type(data)}")
@staticmethod
def get_raw_local_maps(env: CoverageSystem, params: Parameters) -> torch.Tensor:
@@ -96,9 +108,13 @@ def get_raw_local_maps(env: CoverageSystem, params: Parameters) -> torch.Tensor:
torch.Tensor: raw local maps
"""
- local_maps = torch.zeros((env.GetNumRobots(), params.pLocalMapSize, params.pLocalMapSize))
+ local_maps = torch.zeros(
+ (env.GetNumRobots(), params.pLocalMapSize, params.pLocalMapSize)
+ )
+
for r_idx in range(env.GetNumRobots()):
local_maps[r_idx] = CoverageEnvUtils.to_tensor(env.GetRobotLocalMap(r_idx))
+
return local_maps
@staticmethod
@@ -114,13 +130,21 @@ def get_raw_obstacle_maps(env: CoverageSystem, params: Parameters) -> torch.Tens
torch.Tensor: raw obstacle maps
"""
- obstacle_maps = torch.zeros((env.GetNumRobots(), params.pLocalMapSize, params.pLocalMapSize))
+ obstacle_maps = torch.zeros(
+ (env.GetNumRobots(), params.pLocalMapSize, params.pLocalMapSize)
+ )
+
for r_idx in range(env.GetNumRobots()):
- obstacle_maps[r_idx] = CoverageEnvUtils.to_tensor(env.GetRobotObstacleMap(r_idx))
+ obstacle_maps[r_idx] = CoverageEnvUtils.to_tensor(
+ env.GetRobotObstacleMap(r_idx)
+ )
+
return obstacle_maps
@staticmethod
- def get_communication_maps(env: CoverageSystem, params: Parameters, map_size: int) -> torch.Tensor:
+ def get_communication_maps(
+ env: CoverageSystem, params: Parameters, map_size: int
+ ) -> torch.Tensor:
"""
Generate communication maps from positions
@@ -140,9 +164,17 @@ def get_communication_maps(env: CoverageSystem, params: Parameters, map_size: in
num_robots = env.GetNumRobots()
comm_maps = torch.zeros((num_robots, 2, map_size, map_size))
+
for r_idx in range(num_robots):
- neighbors_pos = CoverageEnvUtils.to_tensor(env.GetRelativePositonsNeighbors(r_idx))
- scaled_indices = torch.round(neighbors_pos * map_size / (params.pCommunicationRange * params.pResolution * 2.) + (map_size / 2. - params.pResolution / 2.))
+ neighbors_pos = CoverageEnvUtils.to_tensor(
+ env.GetRelativePositonsNeighbors(r_idx)
+ )
+ scaled_indices = torch.round(
+ neighbors_pos
+ * map_size
+ / (params.pCommunicationRange * params.pResolution * 2.0)
+ + (map_size / 2.0 - params.pResolution / 2.0)
+ )
# comm_range_mask = relative_dist[r_idx] < params.pCommunicationRange
# scaled_indices = scaled_relative_pos[r_idx][comm_range_mask]
indices = torch.transpose(scaled_indices, 1, 0)
@@ -150,8 +182,13 @@ def get_communication_maps(env: CoverageSystem, params: Parameters, map_size: in
values = neighbors_pos / params.pCommunicationRange
# values = values / params.pCommunicationRange
# values = (values + params.pCommunicationRange) / (2. * params.pCommunicationRange)
- comm_maps[r_idx][0] = torch.sparse_coo_tensor(indices, values[:, 0], torch.Size([map_size, map_size])).to_dense()
- comm_maps[r_idx][1] = torch.sparse_coo_tensor(indices, values[:, 1], torch.Size([map_size, map_size])).to_dense()
+ comm_maps[r_idx][0] = torch.sparse_coo_tensor(
+ indices, values[:, 0], torch.Size([map_size, map_size])
+ ).to_dense()
+ comm_maps[r_idx][1] = torch.sparse_coo_tensor(
+ indices, values[:, 1], torch.Size([map_size, map_size])
+ ).to_dense()
+
return comm_maps
# positions = env.GetRobotPositions()
# robot_positions = CoverageEnvUtils.to_tensor(env.GetRobotPositions())
@@ -178,12 +215,23 @@ def resize_maps(maps: torch.Tensor, resized_map_size: int) -> torch.Tensor:
"""
shape = maps.shape
maps = maps.view(-1, maps.shape[-2], maps.shape[-1])
- maps = torchvision.transforms.functional.resize(maps, (resized_map_size, resized_map_size), interpolation=torchvision.transforms.InterpolationMode.BILINEAR, antialias=True)
+ maps = torchvision.transforms.functional.resize(
+ maps,
+ (resized_map_size, resized_map_size),
+ interpolation=torchvision.transforms.InterpolationMode.BILINEAR,
+ antialias=True,
+ )
maps = maps.view(shape[:-2] + maps.shape[-2:])
+
return maps
@staticmethod
- def get_maps(env: CoverageSystem, params: Parameters, resized_map_size: int, use_comm_map: bool) -> torch.Tensor:
+ def get_maps(
+ env: CoverageSystem,
+ params: Parameters,
+ resized_map_size: int,
+ use_comm_map: bool,
+ ) -> torch.Tensor:
"""
Get maps for the coverage environment
@@ -200,14 +248,31 @@ def get_maps(env: CoverageSystem, params: Parameters, resized_map_size: int, use
num_robots = env.GetNumRobots()
raw_local_maps = CoverageEnvUtils.get_raw_local_maps(env, params)
- resized_local_maps = CoverageEnvUtils.resize_maps(raw_local_maps, resized_map_size)
+ resized_local_maps = CoverageEnvUtils.resize_maps(
+ raw_local_maps, resized_map_size
+ )
raw_obstacle_maps = CoverageEnvUtils.get_raw_obstacle_maps(env, params)
- resized_obstacle_maps = CoverageEnvUtils.resize_maps(raw_obstacle_maps, resized_map_size)
+ resized_obstacle_maps = CoverageEnvUtils.resize_maps(
+ raw_obstacle_maps, resized_map_size
+ )
+
if use_comm_map:
- comm_maps = CoverageEnvUtils.get_communication_maps(env, params, resized_map_size)
- maps = torch.cat([resized_local_maps.unsqueeze(1), comm_maps, resized_obstacle_maps.unsqueeze(1)], 1)
+ comm_maps = CoverageEnvUtils.get_communication_maps(
+ env, params, resized_map_size
+ )
+ maps = torch.cat(
+ [
+ resized_local_maps.unsqueeze(1),
+ comm_maps,
+ resized_obstacle_maps.unsqueeze(1),
+ ],
+ 1,
+ )
else:
- maps = torch.cat([resized_local_maps.unsqueeze(1), resized_obstacle_maps.unsqueeze(1)], 1)
+ maps = torch.cat(
+ [resized_local_maps.unsqueeze(1), resized_obstacle_maps.unsqueeze(1)], 1
+ )
+
return maps
@staticmethod
@@ -223,8 +288,10 @@ def get_voronoi_features(env: CoverageSystem) -> torch.Tensor:
"""
features = env.GetRobotVoronoiFeatures()
tensor_features = torch.zeros((len(features), len(features[0])))
- for r_idx in range(len(features)):
+
+ for r_idx, _ in enumerate(features):
tensor_features[r_idx] = CoverageEnvUtils.to_tensor(features[r_idx])
+
return tensor_features
@staticmethod
@@ -234,38 +301,73 @@ def get_robot_positions(env: CoverageSystem) -> torch.Tensor:
Args:
env: coverage environment
-
+
Returns:
torch.Tensor: robot positions
"""
robot_positions = CoverageEnvUtils.to_tensor(env.GetRobotPositions())
+
return robot_positions
@staticmethod
def get_weights(env: CoverageSystem, params: Parameters) -> torch.Tensor:
- onebyexp = 1. / math.exp(1.)
+ """
+ Get edge weights for the communication graph
+
+ Args:
+ env: coverage environment
+ params: parameters
+
+ Returns:
+ torch.Tensor: edge weights
+ """
+ onebyexp = 1.0 / math.exp(1.0)
robot_positions = CoverageEnvUtils.to_tensor(env.GetRobotPositions())
pairwise_distances = torch.cdist(robot_positions, robot_positions, 2)
- edge_weights = torch.exp(-(pairwise_distances.square())/(params.pCommunicationRange * params.pCommunicationRange))
+ edge_weights = torch.exp(
+ -(pairwise_distances.square())
+ / (params.pCommunicationRange * params.pCommunicationRange)
+ )
edge_weights.masked_fill_(edge_weights < onebyexp, 0)
edge_weights.fill_diagonal_(0)
+
return edge_weights
# Legacy edge weights used in previous research
# The weights are proportional to the distance
# Trying to move away from this
@staticmethod
- def robot_positions_to_edge_weights(robot_positions: PointVector, world_map_size: int, comm_range: float) -> torch.Tensor:
+ def robot_positions_to_edge_weights(
+ robot_positions: PointVector, world_map_size: int, comm_range: float
+ ) -> torch.Tensor:
+ """
+ Convert robot positions to edge weights
+
+ Args:
+ robot_positions: robot positions
+ world_map_size: size of the world map
+ comm_range: communication range
+
+ Returns:
+ torch.Tensor: edge weights
+ """
x = numpy.array(robot_positions)
- S = distance_matrix(x, x)
- S[S > comm_range] = 0
- C = (world_map_size**2) / (S.shape[0]**2)
- C = 3 / C
- graph_obs = C * S
+ s_mat = distance_matrix(x, x)
+ s_mat[s_mat > comm_range] = 0
+ c_mat = (world_map_size**2) / (s_mat.shape[0] ** 2)
+ c_mat = 3 / c_mat
+ graph_obs = c_mat * s_mat
+
return graph_obs
@staticmethod
- def get_torch_geometric_data(env: CoverageSystem, params: Parameters, use_cnn: bool, use_comm_map: bool, map_size: int) -> torch_geometric.data.Data:
+ def get_torch_geometric_data(
+ env: CoverageSystem,
+ params: Parameters,
+ use_cnn: bool,
+ use_comm_map: bool,
+ map_size: int,
+ ) -> torch_geometric.data.Data:
"""
Get torch geometric data
In this function, the edge weights are binary
@@ -281,6 +383,7 @@ def get_torch_geometric_data(env: CoverageSystem, params: Parameters, use_cnn: b
torch_geometric.data.Data: torch geometric data
"""
+
if use_cnn:
features = CoverageEnvUtils.get_maps(env, params, map_size, use_comm_map)
else:
@@ -289,13 +392,13 @@ def get_torch_geometric_data(env: CoverageSystem, params: Parameters, use_cnn: b
edge_index = edge_weights.indices().long()
weights = edge_weights.values().float()
pos = CoverageEnvUtils.get_robot_positions(env)
- pos = (pos + params.pWorldMapSize/2.0)/params.pWorldMapSize
+ pos = (pos + params.pWorldMapSize / 2.0) / params.pWorldMapSize
data = torch_geometric.data.Data(
- x=features,
- edge_index=edge_index.clone().detach(),
- edge_weight=weights.clone().detach(),
- pos=pos.clone().detach()
- )
+ x=features,
+ edge_index=edge_index.clone().detach(),
+ edge_weight=weights.clone().detach(),
+ pos=pos.clone().detach(),
+ )
return data
diff --git a/python/coverage_control/nn/data_loaders/data_loader_utils.py b/python/coverage_control/nn/data_loaders/data_loader_utils.py
index 50f2282f..e7e93637 100644
--- a/python/coverage_control/nn/data_loaders/data_loader_utils.py
+++ b/python/coverage_control/nn/data_loaders/data_loader_utils.py
@@ -19,24 +19,19 @@
# You should have received a copy of the GNU General Public License along with
# CoverageControl library. If not, see .
-import os
-import sys
-if sys.version_info[1] < 11:
- import tomli as tomllib
-else:
- import tomllib
-import yaml
import torch
import torch_geometric
from coverage_control import IOUtils
__all__ = ["DataLoaderUtils"]
+
## @ingroup python_api
class DataLoaderUtils:
"""
Class to provide utility functions to load tensors and configuration files
"""
+
@staticmethod
def load_maps(path: str, use_comm_map: bool = False) -> torch.tensor:
"""
@@ -67,10 +62,13 @@ def load_maps(path: str, use_comm_map: bool = False) -> torch.tensor:
maps = torch.cat([local_maps, comm_maps, obstacle_maps], 2)
else:
maps = torch.cat([local_maps, obstacle_maps], 2)
+
return maps
@staticmethod
- def load_features(path: str, output_dim: int = None) -> tuple[torch.tensor, torch.tensor, torch.tensor]:
+ def load_features(
+ path: str, output_dim: int = None
+ ) -> tuple[torch.tensor, torch.tensor, torch.tensor]:
"""
Function to load normalized features
@@ -88,12 +86,26 @@ def load_features(path: str, output_dim: int = None) -> tuple[torch.tensor, torc
features_mean: Mean of the features
features_std: Standard deviation of the features
"""
- normalized_coverage_features = IOUtils.load_tensor(f"{path}/normalized_coverage_features.pt")
- coverage_features_mean = IOUtils.load_tensor(f"{path}/../coverage_features_mean.pt")
- coverage_features_std = IOUtils.load_tensor(f"{path}/../coverage_features_std.pt")
+ normalized_coverage_features = IOUtils.load_tensor(
+ f"{path}/normalized_coverage_features.pt"
+ )
+ coverage_features_mean = IOUtils.load_tensor(
+ f"{path}/../coverage_features_mean.pt"
+ )
+ coverage_features_std = IOUtils.load_tensor(
+ f"{path}/../coverage_features_std.pt"
+ )
+
if output_dim is not None:
- normalized_coverage_features = normalized_coverage_features[:, :, :output_dim]
- return normalized_coverage_features, coverage_features_mean, coverage_features_std
+ normalized_coverage_features = normalized_coverage_features[
+ :, :, :output_dim
+ ]
+
+ return (
+ normalized_coverage_features,
+ coverage_features_mean,
+ coverage_features_std,
+ )
@staticmethod
def load_actions(path: str) -> tuple[torch.tensor, torch.tensor, torch.tensor]:
@@ -117,12 +129,11 @@ def load_actions(path: str) -> tuple[torch.tensor, torch.tensor, torch.tensor]:
actions = IOUtils.load_tensor(f"{path}/normalized_actions.pt")
actions_mean = IOUtils.load_tensor(f"{path}/../actions_mean.pt")
actions_std = IOUtils.load_tensor(f"{path}/../actions_std.pt")
+
return actions, actions_mean, actions_std
@staticmethod
def load_robot_positions(path: str) -> torch.tensor:
- robot_positions = IOUtils.load_tensor(f"{path}/robot_positions.pt")
- return robot_positions
"""
Function to load robot positions
@@ -136,6 +147,9 @@ def load_robot_positions(path: str) -> torch.tensor:
robot_positions: The loaded robot positions
"""
+ robot_positions = IOUtils.load_tensor(f"{path}/robot_positions.pt")
+
+ return robot_positions
@staticmethod
def load_edge_weights(path: str) -> torch.tensor:
@@ -154,10 +168,13 @@ def load_edge_weights(path: str) -> torch.tensor:
"""
edge_weights = IOUtils.load_tensor(f"{path}/edge_weights.pt")
edge_weights.to_dense()
+
return edge_weights
@staticmethod
- def to_torch_geometric_data(feature: torch.tensor, edge_weights: torch.tensor, pos: torch.tensor = None) -> torch_geometric.data.Data:
+ def to_torch_geometric_data(
+ feature: torch.tensor, edge_weights: torch.tensor, pos: torch.tensor = None
+ ) -> torch_geometric.data.Data:
"""
The function converts the feature, edge_weights and pos to a torch_geometric.data.Data object
This is essential for using the data with the PyTorch Geometric library
@@ -183,17 +200,19 @@ def to_torch_geometric_data(feature: torch.tensor, edge_weights: torch.tensor, p
edge_index = edge_weights.indices().long()
weights = edge_weights.values().float()
# weights = torch.reciprocal(edge_weights.values().float())
- if pos == None:
+
+ if pos is None:
data = torch_geometric.data.Data(
- x=feature,
- edge_index=edge_index.clone().detach(),
- edge_weight=weights.clone().detach()
- )
+ x=feature,
+ edge_index=edge_index.clone().detach(),
+ edge_weight=weights.clone().detach(),
+ )
else:
data = torch_geometric.data.Data(
- x=feature,
- edge_index=edge_index.clone().detach(),
- edge_weight=weights.clone().detach(),
- pos=pos.clone().detach()
- )
+ x=feature,
+ edge_index=edge_index.clone().detach(),
+ edge_weight=weights.clone().detach(),
+ pos=pos.clone().detach(),
+ )
+
return data
diff --git a/python/coverage_control/nn/data_loaders/loaders.py b/python/coverage_control/nn/data_loaders/loaders.py
index 5966f9d0..e590f88f 100644
--- a/python/coverage_control/nn/data_loaders/loaders.py
+++ b/python/coverage_control/nn/data_loaders/loaders.py
@@ -19,20 +19,33 @@
# You should have received a copy of the GNU General Public License along with
# CoverageControl library. If not, see .
-import os
+"""
+Module for loading datasets
+"""
+
import torch
+from coverage_control import IOUtils
from torch_geometric.data import Dataset
-from .data_loader_utils import DataLoaderUtils
from .coverage_env_utils import CoverageEnvUtils
+from .data_loader_utils import DataLoaderUtils
+
## @ingroup python_api
class LocalMapCNNDataset(Dataset):
"""
Dataset for CNN training
"""
- def __init__(self, data_dir: str, stage: str, use_comm_map: bool, output_dim: int, preload: bool = True):
- super(LocalMapCNNDataset, self).__init__(None, None, None, None)
+
+ def __init__(
+ self,
+ data_dir: str,
+ stage: str,
+ use_comm_map: bool,
+ output_dim: int,
+ preload: bool = True,
+ ):
+ super().__init__(None, None, None, None)
"""
Constructor for the LocalMapCNNDataset class
Args:
@@ -47,7 +60,8 @@ def __init__(self, data_dir: str, stage: str, use_comm_map: bool, output_dim: in
self.data_dir = data_dir
self.output_dim = output_dim
self.use_comm_map = use_comm_map
- if preload == True:
+
+ if preload is True:
self.load_data()
def len(self):
@@ -56,11 +70,17 @@ def len(self):
def get(self, idx):
maps = self.maps[idx]
target = self.targets[idx]
+
return maps, target
def load_data(self):
+ """
+ Load the data from the data directory
+ """
# maps has shape (num_samples, num_robots, nuimage_size, image_size)
- self.maps = DataLoaderUtils.load_maps(f"{self.data_dir}/{self.stage}", self.use_comm_map)
+ self.maps = DataLoaderUtils.load_maps(
+ f"{self.data_dir}/{self.stage}", self.use_comm_map
+ )
num_channels = self.maps.shape[2]
image_size = self.maps.shape[3]
@@ -68,15 +88,19 @@ def load_data(self):
self.dataset_size = self.maps.shape[0]
# self.targets, self.targets_mean, self.targets_std = DataLoaderUtils.load_features(f"{self.data_dir}/{self.stage}", self.output_dim)
- self.targets, self.targets_mean, self.targets_std = DataLoaderUtils.load_actions(f"{self.data_dir}/{self.stage}")
+ self.targets, self.targets_mean, self.targets_std = (
+ DataLoaderUtils.load_actions(f"{self.data_dir}/{self.stage}")
+ )
self.targets = self.targets.view(-1, self.targets.shape[2])
+
class LocalMapGNNDataset(Dataset):
"""
Deprecated
"""
+
def __init__(self, data_dir, stage):
- super(LocalMapGNNDataset, self).__init__(None, None, None, None)
+ super().__init__(None, None, None, None)
self.stage = stage
@@ -84,22 +108,33 @@ def __init__(self, data_dir, stage):
self.coverage_maps = IOUtils.load_tensor(f"{data_dir}/{stage}/coverage_maps.pt")
self.num_robots = self.coverage_maps.shape[1]
self.dataset_size = self.coverage_maps.shape[0]
- self.targets, self.targets_mean, self.targets_std = DataLoaderUtils.load_actions(f"{data_dir}/{stage}")
- self.robot_positions = DataLoaderUtils.load_robot_positions(f"{data_dir}/{stage}")
-
- h_vals = torch.linspace(1.0, -1.0, self.coverage_maps.shape[-2]+1)
- h_vals = (h_vals[1:] + h_vals[:-1])/2
- w_vals = torch.linspace(-1.0, 1.0, self.coverage_maps.shape[-1]+1)
- w_vals = (w_vals[1:] + w_vals[:-1])/2
- self.heatmap_x = torch.stack([h_vals]*self.coverage_maps.shape[-1], axis=1)/100
- self.heatmap_y = torch.stack([w_vals]*self.coverage_maps.shape[-2], axis=0)/100
-
+ self.targets, self.targets_mean, self.targets_std = (
+ DataLoaderUtils.load_actions(f"{data_dir}/{stage}")
+ )
+ self.robot_positions = DataLoaderUtils.load_robot_positions(
+ f"{data_dir}/{stage}"
+ )
+
+ h_vals = torch.linspace(1.0, -1.0, self.coverage_maps.shape[-2] + 1)
+ h_vals = (h_vals[1:] + h_vals[:-1]) / 2
+ w_vals = torch.linspace(-1.0, 1.0, self.coverage_maps.shape[-1] + 1)
+ w_vals = (w_vals[1:] + w_vals[:-1]) / 2
+ self.heatmap_x = (
+ torch.stack([h_vals] * self.coverage_maps.shape[-1], axis=1) / 100
+ )
+ self.heatmap_y = (
+ torch.stack([w_vals] * self.coverage_maps.shape[-2], axis=0) / 100
+ )
# Print the details of the dataset with device information
print(f"Dataset: {self.stage} | Size: {self.dataset_size}")
- print(f"Coverage Maps: {self.coverage_maps.shape} | Device: {self.coverage_maps.device}")
+ print(
+ f"Coverage Maps: {self.coverage_maps.shape} | Device: {self.coverage_maps.device}"
+ )
print(f"Targets: {self.targets.shape} | Device: {self.targets.device}")
- print(f"Robot Positions: {self.robot_positions.shape} | Device: {self.robot_positions.device}")
+ print(
+ f"Robot Positions: {self.robot_positions.shape} | Device: {self.robot_positions.device}"
+ )
print(f"Heatmap X: {self.heatmap_x.shape} | Device: {self.heatmap_x.device}")
print(f"Heatmap Y: {self.heatmap_y.shape} | Device: {self.heatmap_y.device}")
@@ -114,73 +149,97 @@ def get(self, idx):
# heatmaps are of shape image_size x image_size
heatmap_x = torch.stack([self.heatmap_x] * coverage_maps.shape[0])
heatmap_y = torch.stack([self.heatmap_y] * coverage_maps.shape[0])
- maps = torch.stack([coverage_maps[:,0], coverage_maps[:,1], heatmap_x, heatmap_y], dim=1)
+ maps = torch.stack(
+ [coverage_maps[:, 0], coverage_maps[:, 1], heatmap_x, heatmap_y], dim=1
+ )
# maps = maps.view(self.num_robots, 4, maps.shape[-2], maps.shape[-1])
- edge_weights = CoverageEnvUtils.RobotPositionsToEdgeWeights(self.robot_positions[idx], 2048, 256)
+ edge_weights = CoverageEnvUtils.RobotPositionsToEdgeWeights(
+ self.robot_positions[idx], 2048, 256
+ )
data = DataLoaderUtils.to_torch_geometric_data(maps, edge_weights)
targets = self.targets[idx]
+
return data, targets
+
## @ingroup python_api
class CNNGNNDataset(Dataset):
"""
Dataset for hybrid CNN-GNN training
"""
+
def __init__(self, data_dir, stage, use_comm_map, world_size):
- super(CNNGNNDataset, self).__init__(None, None, None, None)
+ super().__init__(None, None, None, None)
self.stage = stage
self.maps = DataLoaderUtils.load_maps(f"{data_dir}/{stage}", use_comm_map)
self.dataset_size = self.maps.shape[0]
- self.targets, self.targets_mean, self.targets_std = DataLoaderUtils.load_actions(f"{data_dir}/{stage}")
+ self.targets, self.targets_mean, self.targets_std = (
+ DataLoaderUtils.load_actions(f"{data_dir}/{stage}")
+ )
self.edge_weights = DataLoaderUtils.load_edge_weights(f"{data_dir}/{stage}")
- self.robot_positions = DataLoaderUtils.load_robot_positions(f"{data_dir}/{stage}")
- self.robot_positions = (self.robot_positions + world_size/2)/world_size
+ self.robot_positions = DataLoaderUtils.load_robot_positions(
+ f"{data_dir}/{stage}"
+ )
+ self.robot_positions = (self.robot_positions + world_size / 2) / world_size
# Print the details of the dataset with device information
print(f"Dataset: {self.stage} | Size: {self.dataset_size}")
print(f"Maps: {self.maps.shape} | Device: {self.maps.device}")
print(f"Targets: {self.targets.shape} | Device: {self.targets.device}")
- print(f"Edge Weights: {self.edge_weights.shape} | Device: {self.edge_weights.device}")
+ print(
+ f"Edge Weights: {self.edge_weights.shape} | Device: {self.edge_weights.device}"
+ )
print(f"Targets: {self.targets.shape} | Device: {self.targets.device}")
- print(f"Robot Positions: {self.robot_positions.shape} | Device: {self.robot_positions.device}")
-
+ print(
+ f"Robot Positions: {self.robot_positions.shape} | Device: {self.robot_positions.device}"
+ )
def len(self):
return self.dataset_size
def get(self, idx):
- data = DataLoaderUtils.to_torch_geometric_data(self.maps[idx], self.edge_weights[idx], self.robot_positions[idx])
+ data = DataLoaderUtils.to_torch_geometric_data(
+ self.maps[idx], self.edge_weights[idx], self.robot_positions[idx]
+ )
# data = CoverageEnvUtils.GetTorchGeometricDataRobotPositions(self.maps[idx], self.robot_positions[idx])
targets = self.targets[idx]
+
if targets.dim == 3:
targets = targets.view(-1, targets.shape[-1])
+
return data, targets
+
## @ingroup python_api
class VoronoiGNNDataset(Dataset):
"""
Dataset for non-hybrid GNN training
"""
+
def __init__(self, data_dir, stage, output_dim):
- super(VoronoiGNNDataset, self).__init__(None, None, None, None)
+ super().__init__(None, None, None, None)
self.stage = stage
self.output_dim = output_dim
self.features = DataLoaderUtils.load_features(f"{data_dir}/{stage}", output_dim)
- self.dataset_size = self.features.shape[0]
- self.targets, self.targets_mean, self.targets_std = DataLoaderUtils.load_actions(f"{data_dir}/{stage}")
+ self.dataset_size = self.features[0].shape[0]
+ self.targets, self.targets_mean, self.targets_std = (
+ DataLoaderUtils.load_actions(f"{data_dir}/{stage}")
+ )
self.edge_weights = DataLoaderUtils.load_edge_weights(f"{data_dir}/{stage}")
-
def len(self):
return self.dataset_size
def get(self, idx):
- data = DataLoaderUtils.to_torch_geometric_data(self.features[idx], self.edge_weights[idx], self.targets[idx])
+ data = DataLoaderUtils.to_torch_geometric_data(
+ self.features[idx], self.edge_weights[idx], self.targets[idx]
+ )
+
return data, data.y
diff --git a/python/coverage_control/nn/models/__init__.py b/python/coverage_control/nn/models/__init__.py
index a0d00371..66c3d732 100644
--- a/python/coverage_control/nn/models/__init__.py
+++ b/python/coverage_control/nn/models/__init__.py
@@ -1,8 +1,10 @@
-from __future__ import annotations
+"""
+This module contains the implementation of the LPAC architecture.
+"""
-__name__ = "models"
+from __future__ import annotations
from .cnn import CNN
from .lpac import LPAC
-__all__ = ["__name__", "CNN", "LPAC"]
+__all__ = ["CNN", "LPAC"]
diff --git a/python/coverage_control/nn/models/cnn.py b/python/coverage_control/nn/models/cnn.py
index 915a319f..e634fecc 100644
--- a/python/coverage_control/nn/models/cnn.py
+++ b/python/coverage_control/nn/models/cnn.py
@@ -19,6 +19,9 @@
# You should have received a copy of the GNU General Public License along with
# CoverageControl library. If not, see .
+"""
+Implements an architecture consisting of a multi-layer CNN followed by an MLP, according to parameters specified in the input config
+"""
import torch
from torch_geometric.nn import MLP
@@ -27,28 +30,53 @@
__all__ = ["CNN"]
+
+## @ingroup python_api
class CNN(torch.nn.Module, CNNConfigParser):
"""
- Implements an architecture consisting of a multi-layer CNN followed by an MLP, according to parameters specified in the input config
- This is the current architecture used in the hybrid CNN-GNN
+ Implements an architecture consisting of a multi-layer CNN followed by an MLP, according to parameters specified in the input config.
"""
+
def __init__(self, config: dict):
- super(CNN, self).__init__()
- self.Parse(config)
+ super().__init__()
+ self.parse(config)
self.cnn_backbone = CNNBackBone(self.config)
- self.mlp = MLP([self.latent_size, 2 * self.latent_size, 2 * self.latent_size, self.latent_size])
+ self.mlp = MLP(
+ [
+ self.latent_size,
+ 2 * self.latent_size,
+ 2 * self.latent_size,
+ self.latent_size,
+ ]
+ )
self.linear = torch.nn.Linear(self.latent_size, self.output_dim)
- def forward(self, x: torch.Tensor, return_embedding=False) -> torch.Tensor:
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
+ """
+ Forward pass through the network
+
+ Args:
+ x: Input tensor
+
+ Returns:
+ Output tensor
+ """
x = self.cnn_backbone(x)
x = self.mlp(x)
x = self.linear(x)
+
return x
def load_cpp_model(self, model_path: str) -> None:
+ """
+ Loads a model saved in cpp jit format
+ """
jit_model = torch.jit.load(model_path)
self.load_state_dict(jit_model.state_dict(), strict=False)
def load_model(self, model_path: str) -> None:
+ """
+ Loads a model saved in pytorch format
+ """
self.load_state_dict(torch.load(model_path), strict=False)
diff --git a/python/coverage_control/nn/models/cnn_backbone.py b/python/coverage_control/nn/models/cnn_backbone.py
index 7eabe2b0..08b14588 100644
--- a/python/coverage_control/nn/models/cnn_backbone.py
+++ b/python/coverage_control/nn/models/cnn_backbone.py
@@ -19,42 +19,73 @@
# You should have received a copy of the GNU General Public License along with
# CoverageControl library. If not, see .
+"""
+Implements a multi-layer convolutional neural network
+"""
import torch
-import math
from .config_parser import CNNConfigParser
+## @ingroup python_api
class CNNBackBone(torch.nn.Module, CNNConfigParser):
"""
- Implements a multi-layer convolutional neural network, with ReLU non-linearities between layers,
+ Implements a multi-layer convolutional neural network,
+ with leaky-ReLU non-linearities between layers,
according to hyperparameters specified in the config
"""
+
def __init__(self, config: dict):
- super(CNNBackBone, self).__init__()
- self.Parse(config)
+ super().__init__()
+ self.parse(config)
- self.add_module("conv0", torch.nn.Conv2d(self.input_dim, self.latent_size, kernel_size=self.kernel_size))
+ self.add_module(
+ "conv0",
+ torch.nn.Conv2d(
+ self.input_dim, self.latent_size, kernel_size=self.kernel_size
+ ),
+ )
self.add_module("batch_norm0", torch.nn.BatchNorm2d(self.latent_size))
+
for layer in range(self.num_layers - 1):
- self.add_module("conv{}".format(layer + 1), torch.nn.Conv2d(self.latent_size, self.latent_size, kernel_size=self.kernel_size))
- self.add_module("batch_norm{}".format(layer + 1), torch.nn.BatchNorm2d(self.latent_size))
+ self.add_module(
+ f"conv{layer + 1}",
+ torch.nn.Conv2d(
+ self.latent_size, self.latent_size, kernel_size=self.kernel_size
+ ),
+ )
+ self.add_module(
+ f"batch_norm{layer + 1}", torch.nn.BatchNorm2d(self.latent_size)
+ )
- self.flatten_size = self.latent_size * (self.image_size - self.num_layers * (self.kernel_size - 1)) ** 2
+ self.flatten_size = (
+ self.latent_size
+ * (self.image_size - self.num_layers * (self.kernel_size - 1)) ** 2
+ )
- self.add_module("linear_1", torch.nn.Linear(self.flatten_size, self.latent_size))
+ self.add_module(
+ "linear_1", torch.nn.Linear(self.flatten_size, self.latent_size)
+ )
# self.add_module("linear_2", torch.nn.Linear(self.latent_size, self.backbone_output_dim))
# self.add_module("linear_3", torch.nn.Linear(2 * self.output_dim, self.output_dim))
-
def forward(self, x: torch.Tensor) -> torch.Tensor:
+ """
+ Forward pass through the network
+
+ Args:
+ x: input tensor
+ """
for layer in range(self.num_layers):
- x = torch.nn.functional.leaky_relu(self._modules["batch_norm{}".format(layer)](self._modules["conv{}".format(layer)](x)))
+ x = torch.nn.functional.leaky_relu(
+ self._modules[f"batch_norm{layer}"](self._modules[f"conv{layer}"](x))
+ )
# x = self._modules["conv{}".format(layer)](x)
# x = self._modules["batch_norm{}".format(layer)](x)
# x = torch.nn.functional.leaky_relu(x)
x = x.flatten(1)
x = torch.nn.functional.leaky_relu(self.linear_1(x))
+
return x
# x = torch.nn.functional.leaky_relu(self.linear_2(x))
# x = self.linear_3(x)
diff --git a/python/coverage_control/nn/models/config_parser.py b/python/coverage_control/nn/models/config_parser.py
index 2ac77b37..a69a7b5c 100644
--- a/python/coverage_control/nn/models/config_parser.py
+++ b/python/coverage_control/nn/models/config_parser.py
@@ -19,27 +19,61 @@
# You should have received a copy of the GNU General Public License along with
# CoverageControl library. If not, see .
-class CNNConfigParser():
+"""
+This file contains the configuration parser for the models
+"""
+
+
+class CNNConfigParser:
+ """
+ Class to parse the configuration for the CNN model
+ """
+
def __init__(self):
- pass
+ self.config = None
+ self.input_dim = None
+ self.output_dim = None
+ self.num_layers = None
+ self.latent_size = None
+ self.kernel_size = None
+ self.image_size = None
+
+ def parse(self, config: dict) -> None:
+ """
+ Parse the configuration for the CNN model
- def Parse(self, config: dict) -> None:
+ Args:
+ config (dict): Configuration for the CNN model
+ """
self.config = config
- self.input_dim = self.config['InputDim']
- self.output_dim = self.config['OutputDim']
- self.num_layers = self.config['NumLayers']
- self.latent_size = self.config['LatentSize']
- self.kernel_size = self.config['KernelSize']
- self.image_size = self.config['ImageSize']
+ self.input_dim = self.config["InputDim"]
+ self.output_dim = self.config["OutputDim"]
+ self.num_layers = self.config["NumLayers"]
+ self.latent_size = self.config["LatentSize"]
+ self.kernel_size = self.config["KernelSize"]
+ self.image_size = self.config["ImageSize"]
+
-class GNNConfigParser():
- def __init__(self, config):
- pass
+class GNNConfigParser:
+ """
+ Class to parse the configuration for the GNN model
+ """
+
+ def __init__(self):
+ self.config = None
+ self.input_dim = None
+ self.output_dim = None
+ self.num_hops = None
+ self.num_layers = None
+ self.latent_size = None
def parse(self, config: dict) -> None:
+ """
+ Parse the configuration for the GNN model
+ """
self.config = config
- self.input_dim = self.config['InputDim']
- self.output_dim = self.config['OutputDim']
- self.num_hops = self.config['NumHops']
- self.num_layers = self.config['NumLayers']
- self.latent_size = self.config['LatentSize']
+ self.input_dim = self.config["InputDim"]
+ self.output_dim = self.config["OutputDim"]
+ self.num_hops = self.config["NumHops"]
+ self.num_layers = self.config["NumLayers"]
+ self.latent_size = self.config["LatentSize"]
diff --git a/python/coverage_control/nn/models/gnn.py b/python/coverage_control/nn/models/gnn.py
deleted file mode 100644
index cb5a3f66..00000000
--- a/python/coverage_control/nn/models/gnn.py
+++ /dev/null
@@ -1,79 +0,0 @@
-# This file is part of the CoverageControl library
-#
-# Author: Saurav Agarwal
-# Contact: sauravag@seas.upenn.edu, agr.saurav1@gmail.com
-# Repository: https://github.com/KumarRobotics/CoverageControl
-#
-# Copyright (c) 2024, Saurav Agarwal
-#
-# The CoverageControl library is free software: you can redistribute it and/or
-# modify it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or (at your
-# option) any later version.
-#
-# The CoverageControl library is distributed in the hope that it will be
-# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
-# Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along with
-# CoverageControl library. If not, see .
-
-import torch
-import torch_geometric
-from torch_geometric.nn import MLP
-
-from .config_parser import GNNConfigParser
-from .cnn_backbone import CNNBackBone
-from .gnn_backbone import GNNBackBone
-
-__all__ = ["LPAC"]
-
-class LPAC(torch.nn.Module, GNNConfigParser):
- def __init__(self, config):
- super(LPAC, self).__init__()
- self.cnn_config = config['CNN']
- self.parse(config['GNN'])
- self.cnn_backbone = CNNBackBone(self.cnn_config)
- self.gnn_backbone = GNNBackBone(self.config, self.cnn_backbone.latent_size + 2)
- # --- no pos ---
- # self.gnn_backbone = GNNBackBone(self.config, self.cnn_backbone.latent_size)
- # --- no pos ---
- self.gnn_mlp = MLP([self.latent_size, 32, 32])
- self.output_linear = torch.nn.Linear(32, self.output_dim)
- # Register buffers to model
- self.register_buffer("actions_mean", torch.zeros(self.output_dim))
- self.register_buffer("actions_std", torch.ones(self.output_dim))
-
- def forward(self, data: torch_geometric.data.Data) -> torch.Tensor:
- x, edge_index, edge_weight = data.x, data.edge_index, data.edge_weight
- pos = data.pos
- cnn_output = self.cnn_backbone(x.view(-1, x.shape[-3], x.shape[-2], x.shape[-1]))
-
- # --- no pos ---
- # gnn_output = self.gnn_backbone(cnn_output, edge_index)
- # mlp_output = self.gnn_mlp(gnn_output)
- # x = self.output_linear(mlp_output)
- # x = self.output_linear(self.gnn_mlp(self.gnn_backbone(cnn_output, edge_index)))
- # --- no pos ---
-
- gnn_backbone_in = torch.cat([cnn_output, pos], dim=-1)
- # print(gnn_backbone_in)
- # gnn_output = self.gnn_backbone(gnn_backbone_in, edge_index)
- # mid_test = self.gnn_mlp.lins[0](gnn_output)
- # print(f'mid_test sum1: {mid_test.sum()}')
- # mid_test = self.gnn_mlp.norms[0](mid_test)
- # print(f'mid_test sum: {mid_test.sum()}')
- # mlp_output = self.gnn_mlp(self.gnn_backbone(gnn_backbone_in, edge_index)
- # print(f'mlp_output sum: {mlp_output[0]}')
- x = self.output_linear(self.gnn_mlp(self.gnn_backbone(gnn_backbone_in, edge_index)))
- return x
-
- def load_model(self, model_state_dict_path: str) -> None:
- self.load_state_dict(torch.load(model_state_dict_path), strict=False)
-
- def load_cnn_backbone(self, model_path: str) -> None:
- self.load_state_dict(torch.load(model_path).state_dict(), strict=False)
-
- def load_gnn_backbone(self, model_path: str) -> None:
- self.load_state_dict(torch.load(model_path).state_dict(), strict=False)
diff --git a/python/coverage_control/nn/models/gnn_backbone.py b/python/coverage_control/nn/models/gnn_backbone.py
index bb130c54..c5f991ec 100644
--- a/python/coverage_control/nn/models/gnn_backbone.py
+++ b/python/coverage_control/nn/models/gnn_backbone.py
@@ -19,34 +19,53 @@
# You should have received a copy of the GNU General Public License along with
# CoverageControl library. If not, see .
-import os
-import sys
+"""
+Implements a GNN architecture.
+"""
import torch
import torch_geometric
from .config_parser import GNNConfigParser
+## @ingroup python_api
class GNNBackBone(torch.nn.Module, GNNConfigParser):
"""
- Implements a multi-layer graph convolutional neural network, with ReLU non-linearities between layers,
+ Implements a GNN architecture,
according to hyperparameters specified in the input config
"""
- def __init__(self, config, input_dim = None):
- super(GNNBackBone, self).__init__()
+
+ def __init__(self, config, input_dim=None):
+ super().__init__()
self.parse(config)
+
if input_dim is not None:
self.input_dim = input_dim
- self.add_module("graph_conv_0", torch_geometric.nn.TAGConv(in_channels = self.input_dim, out_channels = self.latent_size, K = self.num_hops))
- for i in range(1, self.num_layers):
- self.add_module("graph_conv_{}".format(i), torch_geometric.nn.TAGConv(in_channels = self.latent_size, out_channels = self.latent_size, K = self.num_hops))
+ self.add_module(
+ "graph_conv_0",
+ torch_geometric.nn.TAGConv(
+ in_channels=self.input_dim,
+ out_channels=self.latent_size,
+ K=self.num_hops,
+ ),
+ )
+ for i in range(1, self.num_layers):
+ self.add_module(
+ "graph_conv_{}".format(i),
+ torch_geometric.nn.TAGConv(
+ in_channels=self.latent_size,
+ out_channels=self.latent_size,
+ K=self.num_hops,
+ ),
+ )
- def forward(self, x: torch.Tensor, edge_index: torch.Tensor, edge_weight = None) -> torch.Tensor:
+ def forward(
+ self, x: torch.Tensor, edge_index: torch.Tensor, edge_weight=None
+ ) -> torch.Tensor:
for i in range(self.num_layers):
x = self._modules["graph_conv_{}".format(i)](x, edge_index, edge_weight)
x = torch.relu(x)
- return x
-
+ return x
diff --git a/python/coverage_control/nn/models/lpac.py b/python/coverage_control/nn/models/lpac.py
index a3f4597c..49c56c49 100644
--- a/python/coverage_control/nn/models/lpac.py
+++ b/python/coverage_control/nn/models/lpac.py
@@ -29,9 +29,17 @@
__all__ = ["LPAC"]
+"""
+Module for LPAC model
+"""
+
+## @ingroup python_api
class LPAC(torch.nn.Module, GNNConfigParser):
+ """
+ LPAC neural network architecture
+ """
def __init__(self, in_config):
- super(LPAC, self).__init__()
+ super().__init__()
self.cnn_config = in_config["CNNBackBone"]
self.parse(in_config["GNNBackBone"])
self.cnn_backbone = CNNBackBone(self.cnn_config)
@@ -46,6 +54,9 @@ def __init__(self, in_config):
self.register_buffer("actions_std", torch.ones(self.output_dim))
def forward(self, data: torch_geometric.data.Data) -> torch.Tensor:
+ """
+ Forward pass of the LPAC model
+ """
x, edge_index, edge_weight = data.x, data.edge_index, data.edge_weight
pos = data.pos
cnn_output = self.cnn_backbone(x.view(-1, x.shape[-3], x.shape[-2], x.shape[-1]))
@@ -70,10 +81,19 @@ def forward(self, data: torch_geometric.data.Data) -> torch.Tensor:
return x
def load_model(self, model_state_dict_path: str) -> None:
+ """
+ Load the model from the state dict
+ """
self.load_state_dict(torch.load(model_state_dict_path), strict=False)
def load_cnn_backbone(self, model_path: str) -> None:
+ """
+ Load the CNN backbone from the model path
+ """
self.load_state_dict(torch.load(model_path).state_dict(), strict=False)
def load_gnn_backbone(self, model_path: str) -> None:
+ """
+ Load the GNN backbone from the model path
+ """
self.load_state_dict(torch.load(model_path).state_dict(), strict=False)
diff --git a/python/coverage_control/nn/trainers/__init__.py b/python/coverage_control/nn/trainers/__init__.py
index c5b74393..7bd7eb83 100644
--- a/python/coverage_control/nn/trainers/__init__.py
+++ b/python/coverage_control/nn/trainers/__init__.py
@@ -1,7 +1,8 @@
+"""
+Module for training models
+"""
from __future__ import annotations
-__name__ = "trainers"
-
from .trainer import TrainModel
-__all__ = ["__name__", "TrainModel"]
+__all__ = ["TrainModel"]
diff --git a/python/coverage_control/nn/trainers/multi_trainer.py b/python/coverage_control/nn/trainers/multi_trainer.py
deleted file mode 100644
index 96f44da8..00000000
--- a/python/coverage_control/nn/trainers/multi_trainer.py
+++ /dev/null
@@ -1,251 +0,0 @@
-# This file is part of the CoverageControl library
-#
-# Author: Saurav Agarwal
-# Contact: sauravag@seas.upenn.edu, agr.saurav1@gmail.com
-# Repository: https://github.com/KumarRobotics/CoverageControl
-#
-# Copyright (c) 2024, Saurav Agarwal
-#
-# The CoverageControl library is free software: you can redistribute it and/or
-# modify it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or (at your
-# option) any later version.
-#
-# The CoverageControl library is distributed in the hope that it will be
-# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
-# Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along with
-# CoverageControl library. If not, see .
-
-import torch
-
-__all__ = ["MultiTrainModel"]
-class MultiTrainModel():
- """
- Train a model using pytorch
-
- Use the class TrainModel in trainer.py to train a single model
- """
-
- def __init__(self, models: torch.nn.Module, train_loader: torch.utils.data.DataLoader, val_loader: torch.utils.data.DataLoader, test_loader: torch.utils.data.DataLoader, optimizers: torch.optim.Optimizer, criterion: torch.nn.Module, epochs: int, devices: list, model_files: list, optimizer_files: list):
- """
- Initialize the model trainer
-
- Args:
- models: list of torch.nn.Module
- train_loader: loader for the training data
- val_loader: loader for the validation data
- test_loader: loader for the test data
- optimizers: list of optimizers for the model
- criterion: loss function
- epochs: number of epochs
- devices: list of devices to train the model
- model_files: list of files to save the model
- optimizer_files: list of files to save the optimizer
- """
- self.models = models
- self.train_loader = train_loader
- self.val_loader = val_loader
- self.test_loader = test_loader
- self.optimizers = optimizers
- self.criterion = criterion
- self.epochs = epochs
- self.devices = devices
- self.model_files = model_files
- self.optimizer_files = optimizer_files
- for i, model in enumerate(self.models):
- device = torch.device('cuda', index = self.devices[i])
- self.models[i] = model.to(device)
-
- def load_saved_model_dict(self, model_path: str) -> None:
- """
- Load the saved model
-
- Args:
- model_path: model path
- """
- self.model.load_state_dict(torch.load(model_path))
-
- def load_saved_model(self, model_path: str) -> None:
- """
- Load the saved model
-
- Args:
- model_path: model path
- """
- self.model = torch.load(model_path)
-
- def load_saved_optimizer(self, optimizer_path: str) -> None:
- """
- Load the saved optimizer
-
- Args:
- optimizer_path: optimizer path
- """
- self.optimizer = torch.load(optimizer_path)
-
- # Train in batches, save the best model using the validation set
- def train(self) -> None:
- """
- Train the model
-
- Args:
- None
- """
- # Initialize the best validation loss
- best_val_losses = [float('inf')] * len(self.models)
-
- # Initialize the loss history
- train_loss_histories = [[] for _ in range(len(self.models))]
- val_loss_histories = [[] for _ in range(len(self.models))]
-
- # Train the model
- for epoch in range(self.epochs):
- # Training
- train_losses = self.TrainEpoch()
- train_loss_histories = [train_loss_history + [train_loss] for train_loss_history, train_loss in zip(train_loss_histories, train_losses)]
-
- # Validation
- val_losses = self.ValidateEpoch()
- val_loss_histories = [val_loss_history + [val_loss] for val_loss_history, val_loss in zip(val_loss_histories, val_losses)]
-
- # Save the best model
- for i, (val_loss, best_val_loss, model, model_file, optimizer, optimizer_file) in enumerate(zip(val_losses, best_val_losses, self.models, self.model_files, self.optimizers, self.optimizer_files)):
- if val_loss < best_val_loss:
- best_val_losses[i] = val_loss
- torch.save(model, model_file)
- torch.save(optimizer, optimizer_file)
-
- # Print the loss
- print("Epoch: {}, Train Loss: {}, Val Loss: {}".format(epoch, train_losses, val_losses))
-
- # Save the loss history
- for i, (train_loss_history, val_loss_history) in enumerate(zip(train_loss_histories, val_loss_histories)):
- model_path = self.model_files[i].split('.')[0]
- torch.save(train_loss_history, model_path + '_train_loss.pt')
- torch.save(val_loss_history, model_path + '_val_loss.pt')
-
- # Train the model in batches
- def train_epoch(self):
- """
- Train the model in batches
- """
- # Initialize the training loss
- train_losses = [0.0] * len(self.models)
-
- # Set the model to training mode
- for model in self.models:
- model.train()
-
- num_dataset = 0
- # Train the model in batches
- for batch_idx, (data, target) in enumerate(self.train_loader):
-
- if target.dim() == 3:
- target = target.view(-1, target.shape[-1])
-
- # Clear the gradients
- for optimizer in self.optimizers:
- optimizer.zero_grad()
-
- # Move the data to the device
- for i in range(len(self.models)):
- data, target = data.to(torch.device('cuda', index=self.devices[i])), target.to(torch.device('cuda', index=self.devices[i]))
-
- # Forward propagation
- output = self.models[i](data)
-
- # Calculate the loss
- loss = self.criterion(output, target)
-
- # Print batch number and loss
- if batch_idx % 100 == 0:
- print("i: {}, Batch: {}, Loss: {}".format(i, batch_idx, loss.item()))
-
- # Backward propagation
- loss.backward()
- self.optimizers[i].step()
-
- # Update the training loss
- train_losses[i] += loss.item() * data.size(0)
- num_dataset += data.size(0)
-
- # Return the training loss
- return train_losses/num_dataset
-
- # Validate the model in batches
- def validate_epoch(self):
- """
- Validate the model in batches
- """
- # Initialize the validation loss
- val_losses = [0.0] * len(self.models)
-
- # Set the model to evaluation mode
- for model in self.models:
- model.eval()
-
- num_dataset = 0
- # Validate the model in batches
- with torch.no_grad():
- for batch_idx, (data, target) in enumerate(self.val_loader):
- if target.dim() == 3:
- target = target.view(-1, target.shape[-1])
-
- # Move the data to the device
- for i in range(self.devices):
- device = torch.device('cuda', index=self.devices[i])
- data, target = data.to(device), target.to(device)
-
- # Forward propagation
- output = self.models[i](data)
-
- # Calculate the loss
- loss = self.criterion(output, target)
-
- # Update the validation loss
- val_losses[i] += loss.item() * data.size(0)
- num_dataset += data.size(0)
-
- # Return the validation loss
- return val_losses/num_dataset
-
- # Test the model in batches
- def test(self):
- """
- Test the model in batches
- :return: test loss
- """
- # Initialize the test loss
- test_losses = [0.0] * len(self.models)
-
- # Set the model to evaluation mode
- for model in self.models:
- model.eval()
-
- num_dataset = 0
- # Test the model in batches
- with torch.no_grad():
- for batch_idx, (data, target) in enumerate(self.test_loader):
- if target.dim() == 3:
- target = target.view(-1, target.shape[-1])
-
- # Move the data to the device
- for i in range(self.devices):
- device = torch.device('cuda', index=self.devices[i])
- data, target = data.to(device), target.to(device)
-
- # Forward propagation
- output = self.models[i](data)
-
- # Calculate the loss
- loss = self.criterion(output, target)
-
- # Update the test loss
- test_losses[i] += loss.item() * data.size(0)
- num_dataset += data.size(0)
-
- # Return the test loss
- return test_losses/num_dataset
diff --git a/python/coverage_control/nn/trainers/trainer.py b/python/coverage_control/nn/trainers/trainer.py
index 43aa313e..f89b543a 100644
--- a/python/coverage_control/nn/trainers/trainer.py
+++ b/python/coverage_control/nn/trainers/trainer.py
@@ -19,18 +19,36 @@
# You should have received a copy of the GNU General Public License along with
# CoverageControl library. If not, see .
+"""
+Train a model using pytorch
+"""
+
import time
+
import torch
__all__ = ["TrainModel"]
-class TrainModel():
+
+## @ingroup python_api
+class TrainModel:
"""
Train a model using pytorch
"""
- def __init__(self, model: torch.nn.Module, train_loader: torch.utils.data.DataLoader, val_loader: torch.utils.data.DataLoader, optimizer: torch.optim.Optimizer, criterion: torch.nn.Module, epochs: int, device: torch.device, model_file: str, optimizer_file: str):
+ def __init__(
+ self,
+ model: torch.nn.Module,
+ train_loader: torch.utils.data.DataLoader,
+ val_loader: torch.utils.data.DataLoader,
+ optimizer: torch.optim.Optimizer,
+ criterion: torch.nn.Module,
+ epochs: int,
+ device: torch.device,
+ model_file: str,
+ optimizer_file: str,
+ ):
"""
Initialize the model trainer
@@ -67,7 +85,7 @@ def load_saved_model_dict(self, model_path: str) -> None:
def load_saved_model(self, model_path: str) -> None:
"""
Load the saved model
-
+
Args:
model_path: model path
"""
@@ -96,32 +114,38 @@ def train(self) -> None:
val_loss_history = []
start_time = time.time()
- model_path = self.model_file.split('.')[0]
+ model_path = self.model_file.split(".")[0]
# Train the model
+
for epoch in range(self.epochs):
# Training
train_loss = self.TrainEpoch()
train_loss_history.append(train_loss)
- torch.save(train_loss_history, model_path + '_train_loss.pt')
+ torch.save(train_loss_history, model_path + "_train_loss.pt")
# Print the loss
- print("Epoch: {}/{}.. ".format(epoch + 1, self.epochs),
- "Training Loss: {:.5f}.. ".format(train_loss))
-
+ print(
+ "Epoch: {}/{}.. ".format(epoch + 1, self.epochs),
+ "Training Loss: {:.5f}.. ".format(train_loss),
+ )
# Validation
+
if self.val_loader is not None:
val_loss = self.validate_epoch(self.val_loader)
val_loss_history.append(val_loss)
- torch.save(val_loss_history, model_path + '_val_loss.pt')
+ torch.save(val_loss_history, model_path + "_val_loss.pt")
# Save the best model
+
if val_loss < best_val_loss:
best_val_loss = val_loss
torch.save(self.model, self.model_file)
torch.save(self.optimizer, self.optimizer_file)
- print("Epoch: {}/{}.. ".format(epoch + 1, self.epochs),
- "Validation Loss: {:.5f}.. ".format(val_loss),
- "Best Validation Loss: {:.5f}.. ".format(best_val_loss))
+ print(
+ "Epoch: {}/{}.. ".format(epoch + 1, self.epochs),
+ "Validation Loss: {:.5f}.. ".format(val_loss),
+ "Best Validation Loss: {:.5f}.. ".format(best_val_loss),
+ )
if train_loss < best_train_loss:
best_train_loss = train_loss
@@ -135,7 +159,6 @@ def train(self) -> None:
# Print elapsed time in minutes
print("Elapsed time: {:.2f} minutes".format(elapsed_time / 60))
-
# Train the model in batches
def TrainEpoch(self) -> float:
"""
@@ -152,6 +175,7 @@ def TrainEpoch(self) -> float:
num_dataset = 0
# Train the model in batches
+
for batch_idx, (data, target) in enumerate(self.train_loader):
# Move the data to the device
data, target = data.to(self.device), target.to(self.device)
@@ -169,6 +193,7 @@ def TrainEpoch(self) -> float:
loss = self.criterion(output, target)
# Print batch number and loss
+
if batch_idx % 10 == 0:
print("Batch: {}, Loss: {}".format(batch_idx, loss))
@@ -185,6 +210,7 @@ def TrainEpoch(self) -> float:
num_dataset += 1
# Return the training loss
+
return train_loss / num_dataset
# Validate the model in batches
@@ -227,6 +253,7 @@ def validate_epoch(self, data_loader: torch.utils.data.DataLoader) -> float:
num_dataset += 1
# Return the validation loss
+
return val_loss / num_dataset
# Test the model in batches
diff --git a/python/scripts/coverage_env/coverage_class.py b/python/scripts/coverage_env/coverage_class.py
index c8321228..96b72e35 100644
--- a/python/scripts/coverage_env/coverage_class.py
+++ b/python/scripts/coverage_env/coverage_class.py
@@ -19,6 +19,9 @@
# You should have received a copy of the GNU General Public License along with
# CoverageControl library. If not, see .
+"""
+An example class to use the CoverageControl library to run a coverage algorithm
+"""
import sys
import coverage_control as cc # Main library
from coverage_control import CoverageSystem
@@ -31,6 +34,9 @@
from coverage_control.algorithms import ClairvoyantCVT as CoverageAlgorithm
class RunCoverageAlgorithm:
+ """
+ A class to run the coverage algorithm
+ """
def __init__(self, params_filename=None):
if params_filename is not None:
@@ -42,30 +48,36 @@ def __init__(self, params_filename=None):
self.controller = CoverageAlgorithm(self.params_, self.params_.pNumRobots, self.env)
def step(self):
+ """
+ Run one step of the coverage algorithm
+ """
self.controller.ComputeActions();
actions = self.controller.GetActions()
error_flag = self.env.StepActions(actions)
return error_flag
def execute(self):
+ """
+ Run the coverage algorithm
+ """
num_steps = 1
init_cost = self.env.GetObjectiveValue()
- print("Initial Coverage cost: " + str('{:.2e}'.format(init_cost)))
+ print(f"Initial Coverage cost: {init_cost:.2e}")
while num_steps <= self.params_.pEpisodeSteps:
if self.step():
- print("Error in step " + str(num_steps))
+ print(f"Error in step {num_steps}")
break
if self.controller.IsConverged():
- print("Converged in step " + str(num_steps))
+ print(f"Converged in step {num_steps}")
break
num_steps = num_steps + 1
final_cost = self.env.GetObjectiveValue()
- print("Improvement %: " + str('{:.2f}'.format(100 * (init_cost - final_cost)/init_cost)))
+ print(f"Improvement %: {100 * (init_cost - final_cost)/init_cost:.2f}")
if __name__ == '__main__':
diff --git a/python/scripts/coverage_env/coverage_simple.py b/python/scripts/coverage_env/coverage_simple.py
index 334adcc1..da8add1a 100644
--- a/python/scripts/coverage_env/coverage_simple.py
+++ b/python/scripts/coverage_env/coverage_simple.py
@@ -19,8 +19,12 @@
# You should have received a copy of the GNU General Public License along with
# CoverageControl library. If not, see .
-import sys
+"""
+A simple example of using the CoverageControl library
+"""
+
import coverage_control as cc
+
# Algorithms available:
# ClairvoyantCVT
# CentralizedCVT
@@ -34,25 +38,27 @@
env = cc.CoverageSystem(params)
init_cost = env.GetObjectiveValue()
-print("Initial Coverage cost: " + str('{:.2e}'.format(init_cost)))
+print(f"Initial Coverage cost: {init_cost:.2e}")
# Runs the coverage control algorithm
controller = CoverageAlgorithm(params, env)
for i in range(0, params.pEpisodeSteps):
# Compute actions to be taken by the robots
- controller.ComputeActions();
+ controller.ComputeActions()
# Get actions from the controller
actions = controller.GetActions()
+
# Send actions to the environment
+
if env.StepActions(actions):
- print("Error in step " + str(i))
+ print(f"Error in step {i}")
break
if controller.IsConverged():
- print("Converged in step " + str(i))
+ print(f"Converged in step {i}")
break
# print some metrics
current_cost = env.GetObjectiveValue()
-print("Improvement %: " + str('{:.2f}'.format(100 * (init_cost - current_cost)/init_cost)))
+print(f"Improvement %: {100 * (init_cost - current_cost)/init_cost:.2f}")
diff --git a/python/scripts/data_generation/data_generation.py b/python/scripts/data_generation/data_generation.py
index 3c4561f2..98e32947 100644
--- a/python/scripts/data_generation/data_generation.py
+++ b/python/scripts/data_generation/data_generation.py
@@ -21,11 +21,10 @@
# @file data_generation.py
# This file contains the code to generate a dataset for learning
-# Prefer using simple_data_generation.py for generating a dataset
-#
+#
-# DataDir = "~/CoverageControl_ws/src/CoverageControl/" # Absolute path to the root of the repository
-# EnvironmentConfig = "params/coverage_control_params.toml" # Relative to DataDir
+# DataDir = "${CoverageControl_ws}/datasets/lpac" # Absolute location
+# EnvironmentConfig = "${CoverageControl_ws}/datasets/lpac/coverage_control_params.toml" # Absolute location
#
# NumDataset = 1000
#
@@ -35,7 +34,7 @@
#
# # The robots stop moving once the algorithm has converged
# # Having some of these converged steps can help in stabilizing robot actions
-# ConvergedDataRatio = 0.25
+# ConvergedDataRatio = 0.02
#
# # Resizing of maps and Sparsification of tensors are triggered every TriggerPostProcessing dataset
# # This should be set based on RAM resources available on the system
@@ -48,42 +47,53 @@
# [DataSetSplit]
# TrainRatio = 0.7
# ValRatio = 0.2
+# TestRatio = 0.1
+
+## @file data_generation.py
+# @brief Class to generate CoverageControl dataset for LPAC architecture
import os
import sys
-import torch
-import math
import pathlib
import datetime
+import math
import coverage_control
-from coverage_control import IOUtils
-from coverage_control import CoverageSystem
+import torch
+from coverage_control import CoverageSystem, IOUtils
from coverage_control.algorithms import ClairvoyantCVT as CoverageAlgorithm
from coverage_control.nn import CoverageEnvUtils
-class DatasetGenerator():
+## @ingroup python_api
+class DatasetGenerator:
+ """
+ Class to generate CoverageControl dataset for LPAC architecture
+ """
+
def __init__(self, config_file, append_dir=None):
self.config = IOUtils.load_toml(config_file)
- self.data_dir = IOUtils.sanitize_path(self.config['DataDir'])
- self.dataset_dir = self.data_dir + '/data/'
+ self.data_dir = IOUtils.sanitize_path(self.config["DataDir"])
+ self.dataset_dir = self.data_dir + "/data/"
+
if append_dir is not None:
self.dataset_dir += append_dir
if not pathlib.Path(self.data_dir).exists():
- print(f'{self.data_dir} does not exist')
- exit()
+ print(f"{self.data_dir} does not exist")
+ sys.exit()
self.dataset_dir_path = pathlib.Path(self.dataset_dir)
+
if not self.dataset_dir_path.exists():
os.makedirs(self.dataset_dir)
env_config_file = IOUtils.sanitize_path(self.config["EnvironmentConfig"])
env_config_file = pathlib.Path(env_config_file)
+
if not env_config_file.exists():
- print(f'{env_config_file} does not exist')
- exit()
+ print(f"{env_config_file} does not exist")
+ sys.exit()
self.env_params = coverage_control.Parameters(env_config_file.as_posix())
@@ -101,6 +111,7 @@ def __init__(self, config_file, append_dir=None):
self.every_num_step = self.config["EveryNumSteps"]
self.trigger_size = self.config["TriggerPostProcessing"]
self.converged_data_ratio = self.config["ConvergedDataRatio"]
+
if self.trigger_size == 0 or self.trigger_size > self.num_dataset:
self.trigger_size = self.num_dataset
@@ -112,59 +123,97 @@ def __init__(self, config_file, append_dir=None):
# Initialize tensors
self.actions = torch.zeros((self.num_dataset, self.num_robots, 2))
self.robot_positions = torch.zeros((self.num_dataset, self.num_robots, 2))
- self.raw_local_maps = torch.zeros((self.trigger_size, self.num_robots, self.env_params.pLocalMapSize, self.env_params.pLocalMapSize))
- self.raw_obstacle_maps = torch.zeros((self.trigger_size, self.num_robots, self.env_params.pLocalMapSize, self.env_params.pLocalMapSize))
- self.local_maps = torch.zeros((self.num_dataset, self.num_robots, self.cnn_map_size, self.cnn_map_size))
- self.obstacle_maps = torch.zeros((self.num_dataset, self.num_robots, self.cnn_map_size, self.cnn_map_size))
- self.comm_maps = torch.zeros((self.num_dataset, self.num_robots, 2, self.cnn_map_size, self.cnn_map_size))
+ self.raw_local_maps = torch.zeros(
+ (
+ self.trigger_size,
+ self.num_robots,
+ self.env_params.pLocalMapSize,
+ self.env_params.pLocalMapSize,
+ )
+ )
+ self.raw_obstacle_maps = torch.zeros(
+ (
+ self.trigger_size,
+ self.num_robots,
+ self.env_params.pLocalMapSize,
+ self.env_params.pLocalMapSize,
+ )
+ )
+ self.local_maps = torch.zeros(
+ (self.num_dataset, self.num_robots, self.cnn_map_size, self.cnn_map_size)
+ )
+ self.obstacle_maps = torch.zeros(
+ (self.num_dataset, self.num_robots, self.cnn_map_size, self.cnn_map_size)
+ )
+ self.comm_maps = torch.zeros(
+ (self.num_dataset, self.num_robots, 2, self.cnn_map_size, self.cnn_map_size)
+ )
self.coverage_features = torch.zeros((self.num_dataset, self.num_robots, 7))
- self.edge_weights = torch.zeros((self.num_dataset, self.num_robots, self.num_robots))
+ self.edge_weights = torch.zeros(
+ (self.num_dataset, self.num_robots, self.num_robots)
+ )
- # Write metrics
- self.metrics_file = self.dataset_dir_path / 'metrics.txt'
- self.metrics = open(self.metrics_file, 'w')
- # Get current time
start_time = datetime.datetime.now()
- self.metrics.write('Time: ' + str(datetime.datetime.now()) + '\n')
- self.metrics.write('Dataset directory: ' + self.dataset_dir + '\n')
- self.PrintTensorSizes()
- self.PrintTensorSizes(self.metrics)
- self.metrics.flush()
-
- self.RunDataGeneration()
- self.SaveDataset()
+ # Write metrics
+ self.metrics_file = self.dataset_dir_path / "metrics.txt"
+ # self.metrics = open(self.metrics_file, 'w')
+ with open(self.metrics_file, "w", encoding="utf-8") as f:
+ # f.write("Time: " + str(datetime.datetime.now()) + "\n")
+ f.write(f"Time: {start_time}\n")
+ f.write("Dataset directory: " + self.dataset_dir + "\n")
+ self.print_tensor_sizes(f)
+ f.flush()
+
+ self.print_tensor_sizes()
+
+ self.run_data_generation()
+ self.save_dataset()
end_time = datetime.datetime.now()
- self.metrics.write('Time: ' + str(datetime.datetime.now()) + '\n')
- self.metrics.write('Total time: ' + str(end_time - start_time) + '\n')
- self.metrics.close()
+ with open(self.metrics_file, "a", encoding="utf-8") as f:
+ f.write("Time: " + str(datetime.datetime.now()) + "\n")
+ f.write("Total time: " + str(end_time - start_time) + "\n")
-
- def RunDataGeneration(self):
+ def run_data_generation(self):
num_non_converged_env = 0
+
while self.dataset_count < self.num_dataset:
- self.env = CoverageSystem(self.env_params, self.env_params.pNumFeatures, self.num_robots)
+ self.env = CoverageSystem(
+ self.env_params, self.env_params.pNumFeatures, self.num_robots
+ )
self.alg = CoverageAlgorithm(self.env_params, self.num_robots, self.env)
self.env_count += 1
- print('Environment: ' + str(self.env_count))
+ print("Environment: " + str(self.env_count))
num_steps = 0
is_converged = False
- while num_steps < self.env_params.pEpisodeSteps and not is_converged and self.dataset_count < self.num_dataset:
+
+ while (
+ num_steps < self.env_params.pEpisodeSteps
+ and not is_converged
+ and self.dataset_count < self.num_dataset
+ ):
if num_steps % self.every_num_step == 0:
- is_converged = self.StepWithSave()
+ is_converged = self.step_with_save()
else:
- is_converged = self.StepWithoutSave()
+ is_converged = self.step_without_save()
num_steps += 1
+
if num_steps == self.env_params.pEpisodeSteps:
num_non_converged_env += 1
- print('Non-converged environment: ' + str(num_non_converged_env))
+ print("Non-converged environment: " + str(num_non_converged_env))
- num_converged_data = math.ceil(self.converged_data_ratio * num_steps / self.every_num_step)
+ num_converged_data = math.ceil(
+ self.converged_data_ratio * num_steps / self.every_num_step
+ )
converged_data_count = 0
- while converged_data_count < num_converged_data and self.dataset_count < self.num_dataset:
- self.StepWithSave()
+
+ while (
+ converged_data_count < num_converged_data
+ and self.dataset_count < self.num_dataset
+ ):
+ self.step_with_save()
converged_data_count += 1
- def StepWithSave(self):
+ def step_with_save(self):
self.alg.ComputeActions()
converged = self.alg.IsConverged()
actions = self.alg.GetActions()
@@ -172,132 +221,210 @@ def StepWithSave(self):
self.actions[count] = CoverageEnvUtils.to_tensor(actions)
self.robot_positions[count] = CoverageEnvUtils.get_robot_positions(self.env)
self.coverage_features[count] = CoverageEnvUtils.get_voronoi_features(self.env)
- self.raw_local_maps[self.trigger_count] = CoverageEnvUtils.get_raw_local_maps(self.env, self.env_params)
- self.raw_obstacle_maps[self.trigger_count] = CoverageEnvUtils.get_raw_obstacle_maps(self.env, self.env_params)
- self.comm_maps[count] = CoverageEnvUtils.get_communication_maps(self.env, self.env_params, self.cnn_map_size)
- self.edge_weights[count] = CoverageEnvUtils.get_weights(self.env, self.env_params)
+ self.raw_local_maps[self.trigger_count] = CoverageEnvUtils.get_raw_local_maps(
+ self.env, self.env_params
+ )
+ self.raw_obstacle_maps[self.trigger_count] = (
+ CoverageEnvUtils.get_raw_obstacle_maps(self.env, self.env_params)
+ )
+ self.comm_maps[count] = CoverageEnvUtils.get_communication_maps(
+ self.env, self.env_params, self.cnn_map_size
+ )
+ self.edge_weights[count] = CoverageEnvUtils.get_weights(
+ self.env, self.env_params
+ )
self.dataset_count += 1
+
if self.dataset_count % 100 == 0:
- print(f'Dataset: {self.dataset_count}/{self.num_dataset}')
+ print(f"Dataset: {self.dataset_count}/{self.num_dataset}")
self.trigger_count += 1
+
if self.trigger_count == self.trigger_size:
- self.TriggerPostProcessing()
+ self.trigger_post_processing()
self.trigger_count = 0
error_flag = self.env.StepActions(actions)
+
return converged or error_flag
- def TriggerPostProcessing(self):
- if self.trigger_start_idx > self.num_dataset -1:
+ def trigger_post_processing(self):
+ if self.trigger_start_idx > self.num_dataset - 1:
return
- trigger_end_idx = min(self.num_dataset, self.trigger_start_idx + self.trigger_size)
- raw_local_maps = self.raw_local_maps[0:trigger_end_idx - self.trigger_start_idx]
+ trigger_end_idx = min(
+ self.num_dataset, self.trigger_start_idx + self.trigger_size
+ )
+ raw_local_maps = self.raw_local_maps[
+ 0 : trigger_end_idx - self.trigger_start_idx
+ ]
raw_local_maps = raw_local_maps.to(self.device)
- resized_local_maps = CoverageEnvUtils.resize_maps(raw_local_maps, self.cnn_map_size)
- self.local_maps[self.trigger_start_idx:trigger_end_idx] = resized_local_maps.view(-1, self.num_robots, self.cnn_map_size, self.cnn_map_size).cpu().clone()
-
- raw_obstacle_maps = self.raw_obstacle_maps[0:trigger_end_idx - self.trigger_start_idx]
+ resized_local_maps = CoverageEnvUtils.resize_maps(
+ raw_local_maps, self.cnn_map_size
+ )
+ self.local_maps[self.trigger_start_idx : trigger_end_idx] = (
+ resized_local_maps.view(
+ -1, self.num_robots, self.cnn_map_size, self.cnn_map_size
+ )
+ .cpu()
+ .clone()
+ )
+
+ raw_obstacle_maps = self.raw_obstacle_maps[
+ 0 : trigger_end_idx - self.trigger_start_idx
+ ]
raw_obstacle_maps = raw_obstacle_maps.to(self.device)
- resized_obstacle_maps = CoverageEnvUtils.resize_maps(raw_obstacle_maps, self.cnn_map_size)
- self.obstacle_maps[self.trigger_start_idx:trigger_end_idx] = resized_obstacle_maps.view(-1, self.num_robots, self.cnn_map_size, self.cnn_map_size).cpu().clone()
+ resized_obstacle_maps = CoverageEnvUtils.resize_maps(
+ raw_obstacle_maps, self.cnn_map_size
+ )
+ self.obstacle_maps[self.trigger_start_idx : trigger_end_idx] = (
+ resized_obstacle_maps.view(
+ -1, self.num_robots, self.cnn_map_size, self.cnn_map_size
+ )
+ .cpu()
+ .clone()
+ )
self.trigger_start_idx = trigger_end_idx
- def NormalizeTensor(self, tensor):
+ def normalize_tensor(self, tensor):
tensor_mean = tensor.mean(dim=[0, 1])
tensor_std = tensor.std(dim=[0, 1])
tensor = (tensor - tensor_mean) / tensor_std
+
return tensor, tensor_mean, tensor_std
- def NormalizeCommunicationMaps(self):
+ def normalize_communication_maps(self):
min_val = self.comm_maps.min()
max_val = self.comm_maps.max()
range_val = max_val - min_val
self.comm_maps = (self.comm_maps - min_val) / range_val
- print('Communication map min: ' + str(min_val))
- print('Communication map max: ' + str(max_val))
+ print("Communication map min: " + str(min_val))
+ print("Communication map max: " + str(max_val))
+
return min_val, range_val
- def SaveTensor(self, tensor, name, as_sparse=False):
+ def save_tensor(self, tensor, name, as_sparse=False):
tensor = tensor.cpu()
- train_tensor = tensor[0:self.train_size].clone()
- validation_tensor = tensor[self.train_size:self.train_size + self.validation_size].clone()
- test_tensor = tensor[self.train_size + self.validation_size:].clone()
+ train_tensor = tensor[0 : self.train_size].clone()
+ validation_tensor = tensor[
+ self.train_size : self.train_size + self.validation_size
+ ].clone()
+ test_tensor = tensor[self.train_size + self.validation_size :].clone()
+
if as_sparse:
train_tensor = train_tensor.to_sparse()
validation_tensor = validation_tensor.to_sparse()
test_tensor = test_tensor.to_sparse()
dataset_dir_path = pathlib.Path(self.dataset_dir)
- torch.save(train_tensor, dataset_dir_path / 'train/' / name)
- torch.save(validation_tensor, dataset_dir_path / 'val/' / name)
- torch.save(test_tensor, dataset_dir_path / 'test/' / name)
-
- def SaveDataset(self):
- as_sparse = self.config['SaveAsSparseQ']
- self.train_size = int(self.num_dataset * self.config['DataSetSplit']['TrainRatio'])
- self.validation_size = int(self.num_dataset * self.config['DataSetSplit']['ValRatio'])
+ torch.save(train_tensor, dataset_dir_path / "train/" / name)
+ torch.save(validation_tensor, dataset_dir_path / "val/" / name)
+ torch.save(test_tensor, dataset_dir_path / "test/" / name)
+
+ def save_dataset(self):
+ as_sparse = self.config["SaveAsSparseQ"]
+ self.train_size = int(
+ self.num_dataset * self.config["DataSetSplit"]["TrainRatio"]
+ )
+ self.validation_size = int(
+ self.num_dataset * self.config["DataSetSplit"]["ValRatio"]
+ )
self.test_size = self.num_dataset - self.train_size - self.validation_size
# Make sure the folder exists
- if not os.path.exists(self.dataset_dir + '/train'):
- os.makedirs(self.dataset_dir + '/train')
- if not os.path.exists(self.dataset_dir + '/val'):
- os.makedirs(self.dataset_dir + '/val')
- if not os.path.exists(self.dataset_dir + '/test'):
- os.makedirs(self.dataset_dir + '/test')
-
- self.SaveTensor(self.robot_positions, 'robot_positions.pt')
- self.SaveTensor(self.local_maps, 'local_maps.pt', as_sparse)
- self.SaveTensor(self.obstacle_maps, 'obstacle_maps.pt', as_sparse)
- self.SaveTensor(self.edge_weights, 'edge_weights.pt', as_sparse)
-
- # min_val, range_val = self.NormalizeCommunicationMaps()
- self.SaveTensor(self.comm_maps, 'comm_maps.pt', as_sparse)
- # torch.save(min_val, self.dataset_dir / 'comm_maps_min.pt')
- # torch.save(range_val, self.dataset_dir / 'comm_maps_range.pt')
- self.SaveTensor(self.actions, 'actions.pt')
- self.SaveTensor(self.coverage_features, 'coverage_features.pt')
+ if not os.path.exists(self.dataset_dir + "/train"):
+ os.makedirs(self.dataset_dir + "/train")
+
+ if not os.path.exists(self.dataset_dir + "/val"):
+ os.makedirs(self.dataset_dir + "/val")
- if self.config['NormalizeQ']:
- normalized_actions, actions_mean, actions_std = self.NormalizeTensor(self.actions)
- coverage_features, coverage_features_mean, coverage_features_std = self.NormalizeTensor(self.coverage_features)
- self.SaveTensor(normalized_actions, 'normalized_actions.pt')
- self.SaveTensor(coverage_features, 'normalized_coverage_features.pt')
- torch.save(actions_mean, self.dataset_dir_path / 'actions_mean.pt')
- torch.save(actions_std, self.dataset_dir_path / 'actions_std.pt')
- torch.save(coverage_features_mean, self.dataset_dir_path / 'coverage_features_mean.pt')
- torch.save(coverage_features_std, self.dataset_dir_path / 'coverage_features_std.pt')
+ if not os.path.exists(self.dataset_dir + "/test"):
+ os.makedirs(self.dataset_dir + "/test")
+ self.save_tensor(self.robot_positions, "robot_positions.pt")
+ self.save_tensor(self.local_maps, "local_maps.pt", as_sparse)
+ self.save_tensor(self.obstacle_maps, "obstacle_maps.pt", as_sparse)
+ self.save_tensor(self.edge_weights, "edge_weights.pt", as_sparse)
+
+ # min_val, range_val = self.normalize_communication_maps()
+ self.save_tensor(self.comm_maps, "comm_maps.pt", as_sparse)
+ # torch.save(min_val, self.dataset_dir / 'comm_maps_min.pt')
+ # torch.save(range_val, self.dataset_dir / 'comm_maps_range.pt')
- def StepWithoutSave(self):
+ self.save_tensor(self.actions, "actions.pt")
+ self.save_tensor(self.coverage_features, "coverage_features.pt")
+
+ if self.config["NormalizeQ"]:
+ normalized_actions, actions_mean, actions_std = self.normalize_tensor(
+ self.actions
+ )
+ coverage_features, coverage_features_mean, coverage_features_std = (
+ self.normalize_tensor(self.coverage_features)
+ )
+ self.save_tensor(normalized_actions, "normalized_actions.pt")
+ self.save_tensor(coverage_features, "normalized_coverage_features.pt")
+ torch.save(actions_mean, self.dataset_dir_path / "actions_mean.pt")
+ torch.save(actions_std, self.dataset_dir_path / "actions_std.pt")
+ torch.save(
+ coverage_features_mean,
+ self.dataset_dir_path / "coverage_features_mean.pt",
+ )
+ torch.save(
+ coverage_features_std,
+ self.dataset_dir_path / "coverage_features_std.pt",
+ )
+
+ def step_without_save(self):
self.alg.ComputeActions()
converged = self.alg.IsConverged()
- if (self.env.StepActions(self.alg.GetActions())):
+
+ if self.env.StepActions(self.alg.GetActions()):
return True
+
return converged
- def GetTensorByteSizeMB(self, tensor):
+ def get_tensor_byte_size_mb(self, tensor):
return (tensor.element_size() * tensor.nelement()) / (1024 * 1024)
- def PrintTensorSizes(self, file=sys.stdout):
+ def print_tensor_sizes(self, file=sys.stdout):
# Set to two decimal places
- print('Tensor sizes:', file=file)
- print('Actions:', self.GetTensorByteSizeMB(self.actions), file=file)
- print('Robot positions:', self.GetTensorByteSizeMB(self.robot_positions), file=file)
- print('Raw local maps:', self.GetTensorByteSizeMB(self.raw_local_maps), file=file)
- print('Raw obstacle maps:', self.GetTensorByteSizeMB(self.raw_obstacle_maps), file=file)
- print('Local maps:', self.GetTensorByteSizeMB(self.local_maps), file=file)
- print('Obstacle maps:', self.GetTensorByteSizeMB(self.obstacle_maps), file=file)
- print('Comm maps:', self.GetTensorByteSizeMB(self.comm_maps), file=file)
- print('Coverage features:', self.GetTensorByteSizeMB(self.coverage_features), file=file)
-
-if __name__ == '__main__':
+ print("Tensor sizes:", file=file)
+ print("Actions:", self.get_tensor_byte_size_mb(self.actions), file=file)
+ print(
+ "Robot positions:",
+ self.get_tensor_byte_size_mb(self.robot_positions),
+ file=file,
+ )
+ print(
+ "Raw local maps:",
+ self.get_tensor_byte_size_mb(self.raw_local_maps),
+ file=file,
+ )
+ print(
+ "Raw obstacle maps:",
+ self.get_tensor_byte_size_mb(self.raw_obstacle_maps),
+ file=file,
+ )
+ print("Local maps:", self.get_tensor_byte_size_mb(self.local_maps), file=file)
+ print(
+ "Obstacle maps:",
+ self.get_tensor_byte_size_mb(self.obstacle_maps),
+ file=file,
+ )
+ print("Comm maps:", self.get_tensor_byte_size_mb(self.comm_maps), file=file)
+ print(
+ "Coverage features:",
+ self.get_tensor_byte_size_mb(self.coverage_features),
+ file=file,
+ )
+
+
+if __name__ == "__main__":
config_file = sys.argv[1]
+
if len(sys.argv) > 2:
append_folder = sys.argv[2]
else:
append_folder = None
- DatasetGenerator(config_file)
+ DatasetGenerator(config_file, append_folder)
diff --git a/python/scripts/data_generation/simple_data_generation.py b/python/scripts/data_generation/simple_data_generation.py
index 0d3d822c..f8aa811d 100644
--- a/python/scripts/data_generation/simple_data_generation.py
+++ b/python/scripts/data_generation/simple_data_generation.py
@@ -21,11 +21,11 @@
# @file simple_data_generation.py
# This file contains the code to generate a dataset for learning
-#
+#
# Uses the following configuration given in a toml file:
-# DataDir = "~/CoverageControl_ws/src/CoverageControl/" # Absolute path to the root of the repository
-# EnvironmentConfig = "params/coverage_control_params.toml" # Relative to DataDir
+# DataDir = "${CoverageControl_ws}/datasets/lpac" # Absolute location
+# EnvironmentConfig = "${CoverageControl_ws}/datasets/lpac/coverage_control_params.toml" # Absolute location
#
# NumDataset = 1000
#
@@ -35,7 +35,7 @@
#
# # The robots stop moving once the algorithm has converged
# # Having some of these converged steps can help in stabilizing robot actions
-# ConvergedDataRatio = 0.25
+# ConvergedDataRatio = 0.02
#
# # Resizing of maps and Sparsification of tensors are triggered every TriggerPostProcessing dataset
# # This should be set based on RAM resources available on the system
@@ -43,6 +43,9 @@
#
# CNNMapSize = 32
+## @file simple_data_generation.py
+# @brief Generates a dataset for coverage control learning
+
import os
import sys
import torch
@@ -55,7 +58,8 @@
from coverage_control.algorithms import ClairvoyantCVT as CoverageAlgorithm
from coverage_control.nn import CoverageEnvUtils
-class DatasetGenerator():
+## @ingroup python_api
+class SimpleDatasetGenerator():
"""
Class for generating dataset for learning
"""
@@ -252,4 +256,4 @@ def print_tensor_sizes(self, file=sys.stdout):
append_folder = sys.argv[2]
else:
append_folder = None
- DatasetGenerator(config_file, append_folder)
+ SimpleDatasetGenerator(config_file, append_folder)
diff --git a/python/scripts/evaluators/controller.py b/python/scripts/evaluators/controller.py
index 74791dcc..73d7185a 100644
--- a/python/scripts/evaluators/controller.py
+++ b/python/scripts/evaluators/controller.py
@@ -19,22 +19,26 @@
# You should have received a copy of the GNU General Public License along with
# CoverageControl library. If not, see .
-
-import torch
-
-import coverage_control as cc# Main library
-from coverage_control import IOUtils
-from coverage_control import CoverageSystem
-from coverage_control import PointVector, Parameters, WorldIDF
-from coverage_control.algorithms import NearOptimalCVT, DecentralizedCVT, ClairvoyantCVT, CentralizedCVT
+## @file controller.py
+# @brief Base classes for CVT and neural network based controllers
import coverage_control.nn as cc_nn
+import torch
+from coverage_control import CoverageSystem, IOUtils, Parameters, PointVector
+from coverage_control.algorithms import (
+ CentralizedCVT,
+ ClairvoyantCVT,
+ DecentralizedCVT,
+ NearOptimalCVT,
+)
from coverage_control.nn import CoverageEnvUtils
+
class ControllerCVT:
"""
Controller class for CVT based controllers
"""
+
def __init__(self, config: dict, params: Parameters, env: CoverageSystem):
"""
Constructor for the CVT controller
@@ -57,7 +61,7 @@ def __init__(self, config: dict, params: Parameters, env: CoverageSystem):
case _:
raise ValueError(f"Unknown controller type: {controller_type}")
- def Step(self, env: CoverageSystem) -> (float, bool):
+ def step(self, env: CoverageSystem) -> (float, bool):
"""
Step function for the CVT controller
@@ -74,14 +78,18 @@ def Step(self, env: CoverageSystem) -> (float, bool):
actions = self.alg.GetActions()
converged = self.alg.IsConverged()
error_flag = env.StepActions(actions)
+
if error_flag:
raise ValueError("Error in step")
+
return env.GetObjectiveValue(), converged
+
class ControllerNN:
"""
Controller class for neural network based controllers
"""
+
def __init__(self, config: dict, params: Parameters, env: CoverageSystem):
"""
Constructor for the neural network controller
@@ -103,8 +111,10 @@ def __init__(self, config: dict, params: Parameters, env: CoverageSystem):
if "ModelFile" in self.config:
self.model_file = IOUtils.sanitize_path(self.config["ModelFile"])
self.model = torch.load(self.model_file)
- else: # Load from ModelStateDict
- self.learning_params_file = IOUtils.sanitize_path(self.config["LearningParams"])
+ else: # Load from ModelStateDict
+ self.learning_params_file = IOUtils.sanitize_path(
+ self.config["LearningParams"]
+ )
self.learning_params = IOUtils.load_toml(self.learning_params_file)
self.model = cc_nn.LPAC(self.learning_params).to(self.device)
self.model.load_model(IOUtils.sanitize_path(self.config["ModelStateDict"]))
@@ -114,9 +124,9 @@ def __init__(self, config: dict, params: Parameters, env: CoverageSystem):
self.model = self.model.to(self.device)
self.model.eval()
- def Step(self, env):
+ def step(self, env):
"""
- Step function for the neural network controller
+ step function for the neural network controller
Performs three steps:
1. Get the data from the environment
@@ -127,12 +137,13 @@ def Step(self, env):
Returns:
Objective value and convergence flag
"""
- pyg_data = CoverageEnvUtils.get_torch_geometric_data(env, self.params, True, self.use_comm_map, self.cnn_map_size).to(self.device)
+ pyg_data = CoverageEnvUtils.get_torch_geometric_data(
+ env, self.params, True, self.use_comm_map, self.cnn_map_size
+ ).to(self.device)
with torch.no_grad():
actions = self.model(pyg_data)
actions = actions * self.actions_std + self.actions_mean
point_vector_actions = PointVector(actions.cpu().numpy())
env.StepActions(point_vector_actions)
- return env.GetObjectiveValue(), False
-
+ return env.GetObjectiveValue(), False
diff --git a/python/scripts/evaluators/eval.py b/python/scripts/evaluators/eval.py
index e0625274..4745c474 100644
--- a/python/scripts/evaluators/eval.py
+++ b/python/scripts/evaluators/eval.py
@@ -19,23 +19,31 @@
# You should have received a copy of the GNU General Public License along with
# CoverageControl library. If not, see .
+## @file eval.py
+# @brief Evaluates the performance of the controllers on a set of environments
+
import os
import sys
-import numpy as np
-
-import torch
import coverage_control as cc
-from coverage_control import IOUtils
-from coverage_control import CoverageSystem
-from coverage_control import Parameters, WorldIDF
+import numpy as np
+from coverage_control import CoverageSystem, IOUtils, WorldIDF
+
from controller import ControllerCVT, ControllerNN
+
+## @ingroup python_api
class Evaluator:
- def __init__(self, config):
- self.config = config
+ """
+ Evaluates the performance of the controllers on a set of environments
+ """
+
+ def __init__(self, in_config):
+ self.config = in_config
self.eval_dir = IOUtils.sanitize_path(self.config["EvalDir"])
self.env_dir = IOUtils.sanitize_path(self.config["EnvironmentDataDir"])
+ self.controller_dir = None
+
if not os.path.exists(self.env_dir):
os.makedirs(self.env_dir)
@@ -43,9 +51,10 @@ def __init__(self, config):
self.controllers_configs = self.config["Controllers"]
for controller_config in self.controllers_configs:
- controller_dir = self.eval_dir + "/" + controller_config["Name"]
- if not os.path.exists(controller_dir):
- os.makedirs(controller_dir)
+ c_dir = self.eval_dir + "/" + controller_config["Name"]
+
+ if not os.path.exists(c_dir):
+ os.makedirs(c_dir)
self.env_config_file = IOUtils.sanitize_path(self.config["EnvironmentConfig"])
self.env_config = IOUtils.load_toml(self.env_config_file)
@@ -56,23 +65,28 @@ def __init__(self, config):
self.num_envs = self.config["NumEnvironments"]
self.num_steps = self.config["NumSteps"]
- def Evaluate(self, save = True):
+ def evaluate(self, save=True):
dataset_count = 0
cost_data = np.zeros((self.num_controllers, self.num_envs, self.num_steps))
+
while dataset_count < self.num_envs:
print(f"Environment {dataset_count}")
pos_file = self.env_dir + "/" + str(dataset_count) + ".pos"
env_file = self.env_dir + "/" + str(dataset_count) + ".env"
+
if os.path.isfile(env_file) and os.path.isfile(pos_file):
world_idf = WorldIDF(self.cc_params, env_file)
env_main = CoverageSystem(self.cc_params, world_idf, pos_file)
else:
print(f"Creating new environment {dataset_count}")
- env_main = CoverageSystem(self.cc_params, self.num_features, self.num_robots)
+ env_main = CoverageSystem(
+ self.cc_params, self.num_features, self.num_robots
+ )
env_main.WriteEnvironment(pos_file, env_file)
world_idf = env_main.GetWorldIDFObject()
- robot_init_pos = env_main.GetRobotPositions(force_no_noise = True)
+ robot_init_pos = env_main.GetRobotPositions(force_no_noise=True)
+
for controller_id in range(self.num_controllers):
step_count = 0
env = CoverageSystem(self.cc_params, world_idf, robot_init_pos)
@@ -87,30 +101,56 @@ def Evaluate(self, save = True):
Controller = ControllerNN
else:
Controller = ControllerCVT
- controller = Controller(self.controllers_configs[controller_id], self.cc_params, env)
- cost_data[controller_id, dataset_count, step_count] = env.GetObjectiveValue()
+ controller = Controller(
+ self.controllers_configs[controller_id], self.cc_params, env
+ )
+ initial_objective_value = env.GetObjectiveValue()
+ cost_data[controller_id, dataset_count, step_count] = (
+ env.GetObjectiveValue() / initial_objective_value
+ )
step_count = step_count + 1
+
while step_count < self.num_steps:
- objective_value, converged = controller.Step(env)
- cost_data[controller_id, dataset_count, step_count] = objective_value
+ objective_value, converged = controller.step(env)
+ cost_data[controller_id, dataset_count, step_count] = (
+ objective_value / initial_objective_value
+ )
+
if converged:
- cost_data[controller_id, dataset_count, step_count:] = objective_value
+ cost_data[controller_id, dataset_count, step_count:] = (
+ objective_value / initial_objective_value
+ )
+
break
# env.PlotMapVoronoi(map_dir, step_count)
# env.RecordPlotData()
step_count = step_count + 1
- if step_count % 100 == 0:
- print(f"Step {step_count}, Objective Value {objective_value}")
- print(f"Environment {dataset_count}, {controller.name}, Step {step_count}")
- if save == True:
- self.controller_dir = self.eval_dir + "/" + self.controllers_configs[controller_id]["Name"]
+ if step_count % 100 == 0:
+ print(
+ f"Step {step_count}, Objective Value {cost_data[controller_id, dataset_count, step_count - 1]}"
+ )
+ print(
+ f"Environment {dataset_count}, {controller.name}, Step {step_count}"
+ )
+
+ if save is True:
+ self.controller_dir = (
+ self.eval_dir
+ + "/"
+ + self.controllers_configs[controller_id]["Name"]
+ )
controller_data_file = self.controller_dir + "/" + "eval.csv"
- np.savetxt(controller_data_file, cost_data[controller_id, :dataset_count + 1, :], delimiter=",")
+ np.savetxt(
+ controller_data_file,
+ cost_data[controller_id, : dataset_count + 1, :],
+ delimiter=",",
+ )
# env.RenderRecordedMap(self.eval_dir + "/" + self.controllers[controller_id]["Name"] + "/", "video.mp4")
del controller
del env
dataset_count = dataset_count + 1
+
return cost_data
@@ -120,4 +160,4 @@ def Evaluate(self, save = True):
config = IOUtils.load_toml(config_file)
evaluator = Evaluator(config)
- evaluator.Evaluate()
+ evaluator.evaluate()
diff --git a/python/scripts/evaluators/eval_single_dataset.py b/python/scripts/evaluators/eval_single_env.py
similarity index 74%
rename from python/scripts/evaluators/eval_single_dataset.py
rename to python/scripts/evaluators/eval_single_env.py
index 0b2e118e..042b4bfa 100644
--- a/python/scripts/evaluators/eval_single_dataset.py
+++ b/python/scripts/evaluators/eval_single_env.py
@@ -19,30 +19,38 @@
# You should have received a copy of the GNU General Public License along with
# CoverageControl library. If not, see .
+## @file eval_single_env.py
+# @brief Evaluate a single dataset with multiple controllers
+
import os
import sys
-import numpy as np
-
-import torch
import coverage_control as cc
-from coverage_control import IOUtils
-from coverage_control import CoverageSystem
-from coverage_control import Parameters, WorldIDF
+import numpy as np
+from coverage_control import CoverageSystem, IOUtils, WorldIDF
+
from controller import ControllerCVT, ControllerNN
+
+## @ingroup python_api
class EvaluatorSingle:
- def __init__(self, config):
- self.config = config
+ """
+ Class to evaluate a single environment with multiple controllers
+ """
+
+ def __init__(self, in_config):
+ self.config = in_config
self.eval_dir = IOUtils.sanitize_path(self.config["EvalDir"]) + "/"
self.env_dir = IOUtils.sanitize_path(self.config["EnvironmentDataDir"]) + "/"
self.feature_file = self.env_dir + self.config["FeatureFile"]
self.pos_file = self.env_dir + self.config["RobotPosFile"]
+
if not os.path.exists(self.env_dir):
os.makedirs(self.env_dir)
if not os.path.exists(self.feature_file):
raise ValueError(f"Feature file {self.feature_file} does not exist")
+
if not os.path.exists(self.pos_file):
raise ValueError(f"Robot position file {self.pos_file} does not exist")
@@ -60,15 +68,15 @@ def __init__(self, config):
self.plot_map = self.config["PlotMap"]
self.generate_video = self.config["GenerateVideo"]
- def Evaluate(self, save = True):
+ def evaluate(self, save=True):
cost_data = np.zeros((self.num_controllers, self.num_steps))
world_idf = WorldIDF(self.cc_params, self.feature_file)
env_main = CoverageSystem(self.cc_params, world_idf, self.pos_file)
- robot_init_pos = env_main.GetRobotPositions(force_no_noise = True)
+ robot_init_pos = env_main.GetRobotPositions(force_no_noise=True)
if self.plot_map:
map_dir = self.eval_dir + "/plots/"
- os.makedirs(map_dir, exist_ok = True)
+ os.makedirs(map_dir, exist_ok=True)
env_main.PlotInitMap(map_dir, "InitMap")
for controller_id in range(self.num_controllers):
@@ -83,36 +91,57 @@ def Evaluate(self, save = True):
Controller = ControllerNN
else:
Controller = ControllerCVT
- controller = Controller(self.controllers_configs[controller_id], self.cc_params, env)
- cost_data[controller_id, step_count] = env.GetObjectiveValue()
+ controller = Controller(
+ self.controllers_configs[controller_id], self.cc_params, env
+ )
+ initial_objective_value = env.GetObjectiveValue()
+ cost_data[controller_id, step_count] = (
+ env.GetObjectiveValue() / initial_objective_value
+ )
step_count = step_count + 1
+
while step_count < self.num_steps:
- objective_value, converged = controller.Step(env)
- cost_data[controller_id, step_count] = objective_value
+ objective_value, converged = controller.step(env)
+ cost_data[controller_id, step_count] = (
+ objective_value / initial_objective_value
+ )
+
if converged and not self.generate_video:
- cost_data[controller_id, step_count:] = objective_value
+ cost_data[controller_id, step_count:] = (
+ objective_value / initial_objective_value
+ )
+
break
+
if self.generate_video:
env.RecordPlotData()
step_count = step_count + 1
+
if step_count % 100 == 0:
- print(f"Step {step_count}, Objective Value {objective_value}")
+ print(
+ f"Step {step_count}, Objective Value {cost_data[controller_id, step_count - 1]}"
+ )
print(f"{controller.name}, Step {step_count}")
- if save == True:
+ if save is True:
controller_dir = self.eval_dir + "/" + controller_name
+
if not os.path.exists(controller_dir):
os.makedirs(controller_dir)
controller_data_file = controller_dir + "/" + "eval.csv"
- np.savetxt(controller_data_file, cost_data[controller_id, :], delimiter=",")
+ np.savetxt(
+ controller_data_file, cost_data[controller_id, :], delimiter=","
+ )
+
if self.generate_video:
controller_dir = self.eval_dir + "/" + controller_name
env.RenderRecordedMap(controller_dir, "video.mp4")
del controller
del env
+
return cost_data
@@ -122,4 +151,4 @@ def Evaluate(self, save = True):
config = IOUtils.load_toml(config_file)
evaluator = EvaluatorSingle(config)
- evaluator.Evaluate()
+ evaluator.evaluate()
diff --git a/python/scripts/training/train_cnn.py b/python/scripts/training/train_cnn.py
index 3dfc384e..8901c5ab 100644
--- a/python/scripts/training/train_cnn.py
+++ b/python/scripts/training/train_cnn.py
@@ -20,20 +20,13 @@
# CoverageControl library. If not, see .
import os
-import sys
import pathlib
+import sys
+
import torch
import torch_geometric
-
-import coverage_control as cc
-import coverage_control.nn as cc_nn
-from coverage_control import PointVector
from coverage_control import IOUtils
-from coverage_control.nn import CoverageEnvUtils
-from coverage_control.nn import CNN
-from coverage_control.nn import LocalMapCNNDataset
-from coverage_control.nn import TrainModel
-
+from coverage_control.nn import CNN, LocalMapCNNDataset, TrainModel
# Set the device
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
@@ -41,11 +34,12 @@
config_file = sys.argv[1]
config = IOUtils.load_toml(config_file)
num_workers = config["NumWorkers"]
-dataset_path = pathlib.Path(IOUtils.sanitize_path(config['DataDir']))
+dataset_path = pathlib.Path(IOUtils.sanitize_path(config["DataDir"]))
data_dir = dataset_path / "data/"
cnn_model = config["CNNModel"]
model_dir = IOUtils.sanitize_path(cnn_model["Dir"]) + "/"
+
if not os.path.exists(model_dir):
os.makedirs(model_dir)
@@ -65,26 +59,44 @@
model = CNN(cnn_config).to(device)
-train_dataset = LocalMapCNNDataset(data_dir, "train", use_comm_map, output_dim)
-val_dataset = LocalMapCNNDataset(data_dir, "val", use_comm_map, output_dim)
-test_dataset = LocalMapCNNDataset(data_dir, "test", use_comm_map, output_dim)
+train_dataset = LocalMapCNNDataset(str(data_dir), "train", use_comm_map, output_dim)
+val_dataset = LocalMapCNNDataset(str(data_dir), "val", use_comm_map, output_dim)
+test_dataset = LocalMapCNNDataset(str(data_dir), "test", use_comm_map, output_dim)
model.register_buffer("target_mean", train_dataset.targets_mean)
model.register_buffer("target_Std", train_dataset.targets_std)
print("Loaded datasets")
-print("Train dataset size: {}".format(len(train_dataset)))
-
-train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=num_workers)
-val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=batch_size, shuffle=False, num_workers=num_workers)
-test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size, shuffle=False, num_workers=num_workers)
-
-optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate, momentum=momentum, weight_decay=weight_decay)
+print(f"Train dataset size: {len(train_dataset)}")
+
+train_loader = torch.utils.data.DataLoader(
+ train_dataset, batch_size=batch_size, shuffle=True, num_workers=num_workers
+)
+val_loader = torch.utils.data.DataLoader(
+ val_dataset, batch_size=batch_size, shuffle=False, num_workers=num_workers
+)
+test_loader = torch.utils.data.DataLoader(
+ test_dataset, batch_size=batch_size, shuffle=False, num_workers=num_workers
+)
+
+optimizer = torch.optim.SGD(
+ model.parameters(), lr=learning_rate, momentum=momentum, weight_decay=weight_decay
+)
# Use mse loss for regression
criterion = torch.nn.MSELoss()
-trainer = TrainModel(model, train_loader, val_loader, optimizer, criterion, num_epochs, device, model_file, optimizer_file)
+trainer = TrainModel(
+ model,
+ train_loader,
+ val_loader,
+ optimizer,
+ criterion,
+ num_epochs,
+ device,
+ model_file,
+ optimizer_file,
+)
# trainer.LoadSavedModel(model_file)
# trainer.LoadSavedOptimizer(optimizer_file)
@@ -92,4 +104,4 @@
test_loss = trainer.Test()
torch.save(test_loss, model_dir + "/test_loss.pt")
-print("Test loss: {}".format(test_loss))
+print(f"Test loss: {test_loss}")
diff --git a/python/scripts/training/train_gnn.py b/python/scripts/training/train_lpac.py
similarity index 73%
rename from python/scripts/training/train_gnn.py
rename to python/scripts/training/train_lpac.py
index 394615b4..6d7cc128 100644
--- a/python/scripts/training/train_gnn.py
+++ b/python/scripts/training/train_lpac.py
@@ -19,20 +19,20 @@
# You should have received a copy of the GNU General Public License along with
# CoverageControl library. If not, see .
+"""
+Train the LPAC model
+"""
+## @file train_lpac.py
+# @brief Train the LPAC model
+
import os
-import sys
import pathlib
+import sys
+
import torch
import torch_geometric
-
-import coverage_control as cc
-import coverage_control.nn as cc_nn
-from coverage_control import PointVector
from coverage_control import IOUtils
-from coverage_control.nn import CoverageEnvUtils
-from coverage_control.nn import LPAC
-from coverage_control.nn import CNNGNNDataset
-from coverage_control.nn import TrainModel
+from coverage_control.nn import LPAC, CNNGNNDataset, TrainModel
# Set the device
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
@@ -41,11 +41,12 @@
world_size = int(sys.argv[2])
config = IOUtils.load_toml(config_file)
num_workers = config["NumWorkers"]
-dataset_path = pathlib.Path(IOUtils.sanitize_path(config['DataDir']))
+dataset_path = pathlib.Path(IOUtils.sanitize_path(config["DataDir"]))
data_dir = dataset_path / "data/"
lpac_model = config["LPACModel"]
model_dir = IOUtils.sanitize_path(lpac_model["Dir"]) + "/"
+
if not os.path.exists(model_dir):
os.makedirs(model_dir)
@@ -65,8 +66,12 @@
# cnn_pretrained_model = config["CNNModel"]["Dir"] + config["CNNModel"]["Model"]
# model.LoadCNNBackBone(cnn_pretrained_model)
-if "PreTrainedModel" in config["LPACModel"]:
- lpac_pretrained_model = IOUtils.sanitize_path(config["LPACModel"]["Dir"]) + config["LPACModel"]["PreTrainedModel"]
+
+if "PreTrainedModel" in config["LPACModel"]:
+ lpac_pretrained_model = (
+ IOUtils.sanitize_path(config["LPACModel"]["Dir"])
+ + config["LPACModel"]["PreTrainedModel"]
+ )
model.load_model(lpac_pretrained_model)
train_dataset = CNNGNNDataset(data_dir, "train", use_comm_map, world_size)
@@ -77,18 +82,34 @@
model.register_buffer("actions_std", train_dataset.targets_std)
print("Loaded datasets")
-print("Train dataset size: {}".format(len(train_dataset)))
+print(f"Train dataset size: {len(train_dataset)}")
-train_loader = torch_geometric.loader.DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=num_workers)
-val_loader = torch_geometric.loader.DataLoader(val_dataset, batch_size=batch_size, shuffle=False, num_workers=num_workers)
+train_loader = torch_geometric.loader.DataLoader(
+ train_dataset, batch_size=batch_size, shuffle=True, num_workers=num_workers
+)
+val_loader = torch_geometric.loader.DataLoader(
+ val_dataset, batch_size=batch_size, shuffle=False, num_workers=num_workers
+)
# optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate, momentum=momentum, weight_decay=weight_decay)
-optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate, weight_decay=weight_decay)
+optimizer = torch.optim.Adam(
+ model.parameters(), lr=learning_rate, weight_decay=weight_decay
+)
# Use mse loss for regression
criterion = torch.nn.MSELoss()
-trainer = TrainModel(model, train_loader, None, optimizer, criterion, num_epochs, device, model_file, optimizer_file)
+trainer = TrainModel(
+ model,
+ train_loader,
+ None,
+ optimizer,
+ criterion,
+ num_epochs,
+ device,
+ model_file,
+ optimizer_file,
+)
# trainer = TrainModel(model, train_loader, val_loader, optimizer, criterion, num_epochs, device, model_file, optimizer_file)
# trainer.load_saved_model(model_file)
# trainer.load_saved_optimizer(optimizer_file)
@@ -98,4 +119,4 @@
# test_dataset = CNNGNNDataset(data_dir, "test", use_comm_map, world_size)
# test_loader = torch_geometric.loader.DataLoader(test_dataset, batch_size=batch_size, shuffle=False, num_workers=24)
# test_loss = trainer.Test(test_loader)
-# print("Test loss: {}".format(test_loss))
+# print(f"Test loss: {test_loss}")
diff --git a/python/tests/test_coverage.py b/python/tests/test_coverage.py
index 38b40d0d..35bf937a 100644
--- a/python/tests/test_coverage.py
+++ b/python/tests/test_coverage.py
@@ -19,16 +19,14 @@
# You should have received a copy of the GNU General Public License along with
# CoverageControl library. If not, see .
-import test as m
import coverage_control as cc
+
# Algorithms available:
# ClairvoyantCVT
# CentralizedCVT
# DecentralizedCVT
# NearOptimalCVT (not deterministic and not tested)
-from coverage_control.algorithms import ClairvoyantCVT
-from coverage_control.algorithms import CentralizedCVT
-from coverage_control.algorithms import DecentralizedCVT
+from coverage_control.algorithms import CentralizedCVT, ClairvoyantCVT, DecentralizedCVT
params = cc.Parameters()
params.pNumFeatures = 3
@@ -44,63 +42,79 @@
robot_pos_list.append(cc.Point2(139, 319))
robot_pos_list.append(cc.Point2(92, 112))
+
def test_clairvoyant_cvt():
env = cc.CoverageSystem(params, bnd_list, robot_pos_list)
init_cost = env.GetObjectiveValue()
controller = ClairvoyantCVT(params, env)
+
for i in range(0, params.pEpisodeSteps):
- controller.ComputeActions();
+ controller.ComputeActions()
actions = controller.GetActions()
+
if env.StepActions(actions):
print("Error in step " + str(i))
+
break
+
if controller.IsConverged():
break
step_num = i
current_cost = env.GetObjectiveValue()
- improvement = 100 * (init_cost - current_cost)/init_cost
+ improvement = 100 * (init_cost - current_cost) / init_cost
improvement_rounded = round(improvement, 2)
assert improvement_rounded == 98.90
assert step_num == 166
+
def test_centralized_cvt():
env = cc.CoverageSystem(params, bnd_list, robot_pos_list)
init_cost = env.GetObjectiveValue()
controller = CentralizedCVT(params, env)
+
for i in range(0, params.pEpisodeSteps):
- controller.ComputeActions();
+ controller.ComputeActions()
actions = controller.GetActions()
+
if env.StepActions(actions):
print("Error in step " + str(i))
+
break
+
if controller.IsConverged():
break
step_num = i
current_cost = env.GetObjectiveValue()
- improvement = 100 * (init_cost - current_cost)/init_cost
+ improvement = 100 * (init_cost - current_cost) / init_cost
improvement_rounded = round(improvement, 2)
assert improvement_rounded == 58.13
assert step_num == 104
+
def test_decentralized_cvt():
env = cc.CoverageSystem(params, bnd_list, robot_pos_list)
init_cost = env.GetObjectiveValue()
controller = DecentralizedCVT(params, env)
+
for i in range(0, params.pEpisodeSteps):
- controller.ComputeActions();
+ controller.ComputeActions()
actions = controller.GetActions()
+
if env.StepActions(actions):
print("Error in step " + str(i))
+
break
+
if controller.IsConverged():
break
step_num = i
current_cost = env.GetObjectiveValue()
- improvement = 100 * (init_cost - current_cost)/init_cost
+ improvement = 100 * (init_cost - current_cost) / init_cost
improvement_rounded = round(improvement, 2)
assert improvement_rounded == 8.87
assert step_num == 17
+
if __name__ == "__main__":
test_clairvoyant_cvt()
test_centralized_cvt()
diff --git a/python/tests/test_coverage_env_utils.py b/python/tests/test_coverage_env_utils.py
index f3497578..54171861 100644
--- a/python/tests/test_coverage_env_utils.py
+++ b/python/tests/test_coverage_env_utils.py
@@ -20,19 +20,17 @@
# CoverageControl library. If not, see .
import os
-import sys
-import tempfile
-import numpy as np
import warnings
-import torch
-import torch_geometric
import coverage_control
+import numpy as np
+import torch
+import torch_geometric
from coverage_control.nn import CoverageEnvUtils
script_dir = os.path.dirname(os.path.realpath(__file__))
-params_file = os.path.join(script_dir, "data/coverage_control_params.toml")
+params_file = os.path.join(script_dir, "data/params/coverage_control_params.toml")
params = coverage_control.Parameters(params_file)
features_file = os.path.join(script_dir, "data/features")
@@ -50,6 +48,7 @@
params.pNumRobots = env.GetNumRobots()
+
def test_to_tensor():
rand_np = np.random.rand(10, 10).astype(np.float32)
rand_torch = CoverageEnvUtils.to_tensor(rand_np)
@@ -59,11 +58,14 @@ def test_to_tensor():
is_all_close = np.allclose(rand_np, rand_torch.numpy())
assert is_all_close
is_all_equal = np.equal(rand_np, rand_torch.numpy()).all()
+
if not is_all_equal and is_all_close:
warnings.warn("Not all elements are equal, but all elements are close")
+
def test_to_tensor_device():
rand_np = np.random.rand(10, 10).astype(np.float32)
+
if torch.cuda.is_available():
rand_torch = CoverageEnvUtils.to_tensor(rand_np).to("cuda")
assert isinstance(rand_torch, torch.Tensor)
@@ -72,63 +74,89 @@ def test_to_tensor_device():
is_all_close = np.allclose(rand_np, rand_torch.cpu().numpy())
assert is_all_close
is_all_equal = np.equal(rand_np, rand_torch.cpu().numpy()).all()
+
if not is_all_equal and is_all_close:
warnings.warn("Not all elements are equal, but all elements are close")
else:
warnings.warn("CUDA not available, skipping test_to_tensor_device")
+
def test_get_raw_local_maps():
local_maps = CoverageEnvUtils.get_raw_local_maps(env, params)
assert isinstance(local_maps, torch.Tensor)
- assert local_maps.shape == (params.pNumRobots, params.pLocalMapSize, params.pLocalMapSize)
+ assert local_maps.shape == (
+ params.pNumRobots,
+ params.pLocalMapSize,
+ params.pLocalMapSize,
+ )
assert local_maps.dtype == torch.float32
- saved_local_maps = torch.load(os.path.join(script_dir, "data/coverage_env_utils/local_maps.pt"))
+ saved_local_maps = torch.load(
+ os.path.join(script_dir, "data/coverage_env_utils/local_maps.pt")
+ )
is_all_close = torch.allclose(local_maps, saved_local_maps)
assert is_all_close
is_all_equal = torch.equal(local_maps, saved_local_maps)
+
if not is_all_equal and is_all_close:
warnings.warn("Not all elements are equal, but all elements are close")
+
def test_get_raw_obstacle_maps():
obstacle_maps = CoverageEnvUtils.get_raw_obstacle_maps(env, params)
assert isinstance(obstacle_maps, torch.Tensor)
- assert obstacle_maps.shape == (params.pNumRobots, params.pLocalMapSize, params.pLocalMapSize)
+ assert obstacle_maps.shape == (
+ params.pNumRobots,
+ params.pLocalMapSize,
+ params.pLocalMapSize,
+ )
assert obstacle_maps.dtype == torch.float32
- saved_obstacle_maps = torch.load(os.path.join(script_dir, "data/coverage_env_utils/obstacle_maps.pt"))
+ saved_obstacle_maps = torch.load(
+ os.path.join(script_dir, "data/coverage_env_utils/obstacle_maps.pt")
+ )
is_all_close = torch.allclose(obstacle_maps, saved_obstacle_maps)
assert is_all_close
is_all_equal = torch.equal(obstacle_maps, saved_obstacle_maps)
+
if not is_all_equal and is_all_close:
warnings.warn("Not all elements are equal, but all elements are close")
+
def test_get_communication_maps():
comm_maps = CoverageEnvUtils.get_communication_maps(env, params, 32)
assert isinstance(comm_maps, torch.Tensor)
assert comm_maps.shape == (params.pNumRobots, 2, 32, 32)
assert comm_maps.dtype == torch.float32
- saved_comm_maps = torch.load(os.path.join(script_dir, "data/coverage_env_utils/comm_maps.pt"))
+ saved_comm_maps = torch.load(
+ os.path.join(script_dir, "data/coverage_env_utils/comm_maps.pt")
+ )
is_all_close = torch.allclose(comm_maps, saved_comm_maps)
assert is_all_close
is_all_equal = torch.equal(comm_maps, saved_comm_maps)
+
if not is_all_equal and is_all_close:
warnings.warn("Not all elements are equal, but all elements are close")
+
def test_resize_maps():
local_maps = CoverageEnvUtils.get_raw_local_maps(env, params)
resized_local_maps = CoverageEnvUtils.resize_maps(local_maps, 32)
assert isinstance(resized_local_maps, torch.Tensor)
assert resized_local_maps.shape == (params.pNumRobots, 32, 32)
assert resized_local_maps.dtype == torch.float32
- saved_resized_local_maps = torch.load(os.path.join(script_dir, "data/coverage_env_utils/resized_local_maps.pt"))
+ saved_resized_local_maps = torch.load(
+ os.path.join(script_dir, "data/coverage_env_utils/resized_local_maps.pt")
+ )
is_all_close = torch.allclose(resized_local_maps, saved_resized_local_maps)
assert is_all_close
is_all_equal = torch.equal(resized_local_maps, saved_resized_local_maps)
+
if not is_all_equal and is_all_close:
warnings.warn("Not all elements are equal, but all elements are close")
+
def test_get_maps():
- maps = CoverageEnvUtils.get_maps(env, params, 32, use_comm_map = True)
+ maps = CoverageEnvUtils.get_maps(env, params, 32, use_comm_map=True)
assert isinstance(maps, torch.Tensor)
assert maps.shape == (params.pNumRobots, 4, 32, 32)
assert maps.dtype == torch.float32
@@ -136,57 +164,76 @@ def test_get_maps():
is_all_close = torch.allclose(maps, saved_maps)
assert is_all_close
is_all_equal = torch.equal(maps, saved_maps)
+
if not is_all_equal and is_all_close:
warnings.warn("Not all elements are equal, but all elements are close")
+
def test_get_voronoi_features():
voronoi_features = CoverageEnvUtils.get_voronoi_features(env)
assert isinstance(voronoi_features, torch.Tensor)
feature_len = len(env.GetRobotVoronoiFeatures()[0])
assert voronoi_features.shape == (params.pNumRobots, feature_len)
assert voronoi_features.dtype == torch.float32
- saved_voronoi_features = torch.load(os.path.join(script_dir, "data/coverage_env_utils/voronoi_features.pt"))
+ saved_voronoi_features = torch.load(
+ os.path.join(script_dir, "data/coverage_env_utils/voronoi_features.pt")
+ )
is_all_close = torch.allclose(voronoi_features, saved_voronoi_features)
assert is_all_close
is_all_equal = torch.equal(voronoi_features, saved_voronoi_features)
+
if not is_all_equal and is_all_close:
warnings.warn("Not all elements are equal, but all elements are close")
+
def test_get_robot_positions():
robot_positions = CoverageEnvUtils.get_robot_positions(env)
assert isinstance(robot_positions, torch.Tensor)
assert robot_positions.shape == (params.pNumRobots, 2)
assert robot_positions.dtype == torch.float32
- saved_robot_positions = torch.load(os.path.join(script_dir, "data/coverage_env_utils/robot_positions.pt"))
+ saved_robot_positions = torch.load(
+ os.path.join(script_dir, "data/coverage_env_utils/robot_positions.pt")
+ )
is_all_close = torch.allclose(robot_positions, saved_robot_positions)
assert is_all_close
is_all_equal = torch.equal(robot_positions, saved_robot_positions)
+
if not is_all_equal and is_all_close:
warnings.warn("Not all elements are equal, but all elements are close")
+
def test_get_weights():
weights = CoverageEnvUtils.get_weights(env, params)
assert isinstance(weights, torch.Tensor)
assert weights.shape == (params.pNumRobots, params.pNumRobots)
assert weights.dtype == torch.float32
- saved_weights = torch.load(os.path.join(script_dir, "data/coverage_env_utils/weights.pt"))
+ saved_weights = torch.load(
+ os.path.join(script_dir, "data/coverage_env_utils/weights.pt")
+ )
is_all_close = torch.allclose(weights, saved_weights)
assert is_all_close
is_all_equal = torch.equal(weights, saved_weights)
+
if not is_all_equal and is_all_close:
warnings.warn("Not all elements are equal, but all elements are close")
+
def test_get_torch_geometric_data():
- data = CoverageEnvUtils.get_torch_geometric_data(env, params, use_cnn = True, use_comm_map = True, map_size = 32)
+ data = CoverageEnvUtils.get_torch_geometric_data(
+ env, params, use_cnn=True, use_comm_map=True, map_size=32
+ )
assert isinstance(data, torch_geometric.data.Data)
assert data.x.shape == (params.pNumRobots, 4, 32, 32)
assert data.x.dtype == torch.float32
assert data.edge_index.shape == (2, 16)
assert data.edge_index.dtype == torch.long
- saved_data = torch.load(os.path.join(script_dir, "data/coverage_env_utils/torch_geometric_data.pt"))
+ saved_data = torch.load(
+ os.path.join(script_dir, "data/coverage_env_utils/torch_geometric_data.pt")
+ )
is_all_close = torch.allclose(data.x, saved_data.x)
assert is_all_close
is_all_equal = torch.equal(data.x, saved_data.x)
+
if not is_all_equal and is_all_close:
warnings.warn("Not all elements are equal, but all elements are close")
# assert data.x.equal(saved_data.x)
@@ -194,6 +241,7 @@ def test_get_torch_geometric_data():
is_all_close = torch.allclose(data.edge_index, saved_data.edge_index)
assert is_all_close
is_all_equal = torch.equal(data.edge_index, saved_data.edge_index)
+
if not is_all_equal and is_all_close:
warnings.warn("Not all elements are equal, but all elements are close")
# assert data.edge_index.equal(saved_data.edge_index)
diff --git a/python/tests/test_map_generation.py b/python/tests/test_map_generation.py
index c21f1bb3..e2cb1715 100644
--- a/python/tests/test_map_generation.py
+++ b/python/tests/test_map_generation.py
@@ -20,23 +20,24 @@
# CoverageControl library. If not, see .
import os
-import sys
-import tempfile
import warnings
-import numpy as np
import coverage_control
+import numpy as np
script_dir = os.path.dirname(os.path.realpath(__file__))
-params_file = os.path.join(script_dir, "data/coverage_control_params.toml")
+params_file = os.path.join(script_dir, "data/params/coverage_control_params.toml")
params = coverage_control.Parameters(params_file)
features_file = os.path.join(script_dir, "data/features")
robot_pos_file = os.path.join(script_dir, "data/robots_positions")
+
def test_map_generation():
- world_idf = coverage_control.WorldIDF(params, os.path.join(script_dir, features_file))
+ world_idf = coverage_control.WorldIDF(
+ params, os.path.join(script_dir, features_file)
+ )
env = coverage_control.CoverageSystem(params, world_idf, robot_pos_file)
world_map = env.GetWorldMap()
@@ -44,9 +45,11 @@ def test_map_generation():
is_all_close = np.allclose(world_map, world_map_ref, atol=1e-2)
assert is_all_close
is_all_equal = np.equal(world_map, world_map_ref).all()
+
if not is_all_equal and is_all_close:
print("Max error: ", np.max(np.abs(world_map - world_map_ref)))
warnings.warn("Not all elements are equal, but all elements are close")
+
if __name__ == "__main__":
test_map_generation()
diff --git a/python/tests/test_models.py b/python/tests/test_models.py
index 44f37642..ccbdec8d 100644
--- a/python/tests/test_models.py
+++ b/python/tests/test_models.py
@@ -20,71 +20,83 @@
# CoverageControl library. If not, see .
import os
-import sys
-import tempfile
-import numpy as np
import warnings
-import torch
-import torch_geometric
-import coverage_control as cc
import coverage_control.nn as cc_nn
+import torch
+import torch_geometric
from coverage_control import IOUtils
-from coverage_control.nn import CoverageEnvUtils
script_dir = os.path.dirname(os.path.realpath(__file__))
-device = torch.device('cpu')
+device = torch.device("cpu")
with torch.no_grad():
- model_file = os.path.join(script_dir, "data/lpac/models/model_k3_1024_state_dict.pt")
- learning_config_file = os.path.join(script_dir, "data/lpac/models/learning_params.toml")
+ model_file = os.path.join(
+ script_dir, "data/lpac/models/model_k3_1024_state_dict.pt"
+ )
+ learning_config_file = os.path.join(
+ script_dir, "data/params/learning_params.toml"
+ )
learning_config = IOUtils.load_toml(learning_config_file)
lpac_model = cc_nn.LPAC(learning_config).to(device)
lpac_model.load_state_dict(torch.load(model_file))
lpac_model.eval()
- use_comm_maps = learning_config['ModelConfig']['UseCommMaps']
- map_size = learning_config['CNNBackBone']['ImageSize']
+ use_comm_maps = learning_config["ModelConfig"]["UseCommMaps"]
+ map_size = learning_config["CNNBackBone"]["ImageSize"]
+
+ lpac_inputs = torch.load(os.path.join(script_dir, "data/lpac/lpac_inputs.pt"))
- lpac_inputs = torch.load(os.path.join(script_dir, "data/nn/lpac_inputs.pt"))
def test_cnn():
with torch.no_grad():
- ref_cnn_outputs = torch.load(os.path.join(script_dir, "data/nn/cnn_outputs.pt"))
+ ref_cnn_outputs = torch.load(os.path.join(script_dir, "data/lpac/cnn_outputs.pt"))
cnn_model = lpac_model.cnn_backbone.to(device).eval()
+
for i in range(0, len(lpac_inputs)):
cnn_output = cnn_model(lpac_inputs[i].x)
is_close = torch.allclose(cnn_output, ref_cnn_outputs[i], atol=1e-4)
+
if not is_close:
error = torch.sum(torch.abs(cnn_output - ref_cnn_outputs[i]))
print(f"Error: {error} at {i}")
assert is_close
+
break
is_equal = torch.equal(cnn_output, ref_cnn_outputs[i])
+
if not is_equal and is_close:
error = torch.sum(torch.abs(cnn_output - ref_cnn_outputs[i]))
print(f"Error: {error} at {i}")
warnings.warn("Outputs are close but not equal")
+
def test_lpac():
with torch.no_grad():
- ref_lpac_outputs = torch.load(os.path.join(script_dir, "data/nn/lpac_outputs.pt"))
+ ref_lpac_outputs = torch.load(
+ os.path.join(script_dir, "data/lpac/lpac_outputs.pt")
+ )
+
for i in range(0, len(lpac_inputs)):
lpac_output = lpac_model(lpac_inputs[i])
is_close = torch.allclose(lpac_output, ref_lpac_outputs[i], atol=1e-4)
+
if not is_close:
error = torch.sum(torch.abs(lpac_output - ref_lpac_outputs[i]))
print(f"Error: {error} at {i}")
assert is_close
+
break
is_equal = torch.equal(lpac_output, ref_lpac_outputs[i])
+
if not is_equal and is_close:
error = torch.sum(torch.abs(lpac_output - ref_lpac_outputs[i]))
print(f"Error: {error} at {i}")
warnings.warn("Outputs are close but not equal")
+
if __name__ == "__main__":
test_cnn()
test_lpac()
diff --git a/python/tests/test_package.py b/python/tests/test_package.py
index 90f31393..de355974 100644
--- a/python/tests/test_package.py
+++ b/python/tests/test_package.py
@@ -20,6 +20,7 @@
# CoverageControl library. If not, see .
import importlib.metadata
+
import coverage_control as m
diff --git a/python/tests/test_parameters.py b/python/tests/test_parameters.py
index d461535e..1b400ee4 100644
--- a/python/tests/test_parameters.py
+++ b/python/tests/test_parameters.py
@@ -19,16 +19,21 @@
# You should have received a copy of the GNU General Public License along with
# CoverageControl library. If not, see .
+import os
import sys
+
if sys.version_info[1] < 11:
import tomli as tomllib
else:
import tomllib
+
import coverage_control as cc
+
def test_parameters():
# Test the parameters
- params_file = "params/coverage_control_params.toml"
+ script_dir = os.path.dirname(os.path.realpath(__file__))
+ params_file = os.path.join(script_dir, "data/params/coverage_control_params.toml")
params = cc.Parameters(params_file)
with open(params_file, "rb") as f:
@@ -45,11 +50,13 @@ def test_parameters():
# Extract all the leaf nodes from the toml file
def extract_leaf_nodes(params_toml):
leaf_nodes = []
+
for key in params_toml:
if isinstance(params_toml[key], dict):
leaf_nodes.extend(extract_leaf_nodes(params_toml[key]))
else:
leaf_nodes.append(key)
+
return leaf_nodes
params_toml = extract_leaf_nodes(params_toml)
@@ -61,5 +68,6 @@ def extract_leaf_nodes(params_toml):
for key in dir_params:
assert key in params_toml
+
if __name__ == "__main__":
test_parameters()
diff --git a/python/tests/test_parity.py b/python/tests/test_parity.py
index a248459f..65252422 100644
--- a/python/tests/test_parity.py
+++ b/python/tests/test_parity.py
@@ -19,19 +19,18 @@
# You should have received a copy of the GNU General Public License along with
# CoverageControl library. If not, see .
-import os
-import sys
-import tempfile
import warnings
-import numpy as np
import coverage_control
+import numpy as np
params = coverage_control.Parameters()
-
+
+
def test_parity():
if not coverage_control.CudaUtils.IsCudaAvailable():
warnings.warn("CUDA not available, skipping test")
+
return
env_cuda = coverage_control.CoverageSystem(params)
world_idf_obj_cuda = env_cuda.GetWorldIDFObject()
@@ -45,15 +44,18 @@ def test_parity():
world_map_cpu = world_idf_obj_cpu.GetWorldMap()
is_close = np.allclose(world_map_cuda, world_map_cpu, atol=1e-2)
+
if not is_close:
diff = np.abs(world_map_cuda - world_map_cpu).max()
print("Max difference: ", diff)
assert is_close
is_equal = np.array_equal(world_map_cuda, world_map_cpu)
+
if not is_equal and is_close:
diff = np.abs(world_map_cuda - world_map_cpu).max()
print("Max difference: ", diff)
warnings.warn("Not all elements are equal, but all elements are close")
+
if __name__ == "__main__":
test_parity()