Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Added functionality: linear transformation and scaling of DLC data. #24

Open
wants to merge 1 commit into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion CODE_OF_CONDUCT.md
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@ further defined and clarified by project maintainers.
## Enforcement

Instances of abusive, harassing, or otherwise unacceptable behavior may be
reported by contacting the project team at alexander.mathis@bethgelab.org. All
reported by contacting the project team at alexander.mathis@epfl.ch. All
complaints will be reviewed and investigated and will result in a response that
is deemed necessary and appropriate to the circumstances. The project team is
obligated to maintain confidentiality with regard to the reporter of an incident.
Expand Down
10 changes: 10 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,16 @@ code: https://github.com/PolarBean/DLC_ROI_tool

Contributed by [Harry Carey](https://github.com/PolarBean)

## Linear Transformation and Scaling of DLC output data (transform_and_scale)

This package is designed for anyone who wants to know where a tracked marker is within a reference frame (i.e. behavioral context). DeepLabCut outputs coordinates in relation to the field of view of the recorded video. With this tool, these coordinates can be linearly transformed and scaled to the reference frame of the behavioral context, meaning that the output coordinates are distances [cm] to a corner of the behavioral context, instead of distances [px] to a corner of the video field of view.

code: https://github.com/DeepLabCut/DLCutils/tree/master/transform_and_scale/

tutorial: https://github.com/DeepLabCut/DLCutils/tree/master/transform_and_scale/transform_and_scale_tutorial.ipynb

Contributed by [Michael Schellenberger](https://github.com/MSchellenberger)

# Clustering tools (using the output of DLC):

## Identifying Behavioral Structure from Deep Variational Embeddings of Animal Motion
Expand Down
208 changes: 208 additions & 0 deletions transform_and_scale/DLCTransformer.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,208 @@
import pandas as pd
import numpy as np
from read_config import read_config
from typing import Optional

class DLCTransformer:
def __init__(self, config_filepath: str, dlc_filepath: Optional[str] = None, dlc_df: Optional[pd.DataFrame] = None):
self.config = read_config(config_filepath)
self.dlc_filepath = dlc_filepath
if self.dlc_filepath is not None:
if self.dlc_filepath.endswith("h5"):
self.dlc_df = pd.read_hdf(dlc_filepath, header=[0, 1, 2], index_col=0)
elif self.dlc_filepath.endswith("csv"):
self.dlc_df = pd.read_csv(dlc_filepath, header=[0, 1, 2], index_col=0)
else:
raise ValueError("DeepLabCut file must be .h5 or .csv")
else:
if dlc_df is not None:
self.dlc_df = dlc_df
else:
raise ValueError("One of the arguments dlc_filepath or dlc_df must be specified")

# read metadata
self.origin_marker = self.config["origin_marker"]
self.basis_vector_h_marker = self.config["basis_vector_h_marker"]
self.basis_vector_v_marker = self.config["basis_vector_v_marker"]
self.scale_factor_h = self.config["scale_factor_h"]
self.scale_factor_v = self.config["scale_factor_v"]
self.dlc_immobile_marker_threshold = self.config[
"dlc_immobile_marker_threshold"
]
self.show_angle = self.config["show_angle"]
self.scorer = self.dlc_df.columns.get_level_values(0).unique()[0]

def run(self) -> pd.DataFrame:
"""
Translate and scale DLC data.
First get median coordinate of origin marker and basis vectors, then transform and scale the data.

:return: pd.DataFrame with transformed and scaled DLC data
"""
# get basis vectors and origin
origin, basis_vector_h, basis_vector_v = self.get_basis_vectors(
df=self.dlc_df,
origin_marker=self.origin_marker,
basis_vector_h_marker=self.basis_vector_h_marker,
basis_vector_v_marker=self.basis_vector_v_marker,
dlc_immobile_marker_threshold=self.dlc_immobile_marker_threshold,
)

if self.show_angle:
print(
"Angle between basis vectors :",
self._calculate_angle(basis_vector_v, (0, 0), basis_vector_h),
)

# transform df
df_transformed = self.transform(
self.dlc_df, origin, basis_vector_h, basis_vector_v
)
df_scaled = self.scale_df(
df_transformed, self.scale_factor_h, self.scale_factor_v
)
return df_scaled

def get_basis_vectors(
self,
df: pd.DataFrame,
origin_marker: str,
basis_vector_h_marker: str,
basis_vector_v_marker: str,
dlc_immobile_marker_threshold: float,
) -> (tuple, tuple, tuple):
"""
Returns the origin and basis vectors of the coordinate system

:param df: pd.DataFrame with tracking data
:param origin_marker: name of origin marker
:param basis_vector_h_marker: name of horizontal basis vector marker
:param basis_vector_v_marker: name of vertical basis vector marker
:param dlc_immobile_marker_threshold: minimum likelihood of a constant marker to be included into the median coordinate calculation
:return: coordinates of origin, basis_vector_h, basis_vector_v
"""
origin = self.get_median_coordinate(
df, origin_marker, dlc_immobile_marker_threshold
)

# bring coordinates into origin system
basis_vector_h_coord = self.get_median_coordinate(
df=df,
marker=basis_vector_h_marker,
dlc_immobile_marker_threshold=dlc_immobile_marker_threshold,
)
basis_vector_v_coord = self.get_median_coordinate(
df=df,
marker=basis_vector_v_marker,
dlc_immobile_marker_threshold=dlc_immobile_marker_threshold,
)

# calculate basis vector transformation
basis_vector_h = (
basis_vector_h_coord[0] - origin[0],
basis_vector_h_coord[1] - origin[1],
)
basis_vector_v = (
basis_vector_v_coord[0] - origin[0],
basis_vector_v_coord[1] - origin[1],
)

return origin, basis_vector_h, basis_vector_v

def get_median_coordinate(self, df, marker, dlc_immobile_marker_threshold) -> tuple:
"""
Returns the most likely coordinate of a vector

:param df: pd.DataFrame with tracking data
:param marker: name of marker of which the median coordinate should be calculated
:param dlc_immobile_marker_threshold: minimum likelihood of a constant marker to be included into the median coordinate calculation
:return: median coordinate of marker
"""
# filter df
df = df.droplevel(0, axis=1)
marker_df = df.loc[
df[marker, "likelihood"] > dlc_immobile_marker_threshold, marker
].copy()
x_coord = np.nanmedian(marker_df["x"])
y_coord = np.nanmedian(marker_df["y"])
coords = (x_coord, y_coord)
return coords

def _calculate_angle(self, a, b, c) -> float:
"""
Calculates the angle between three points a-b-c

:param a: tuple of coordinates
:param b: tuple of coordinates
:param c: tuple of coordinates
:return: angle between a-b-c in degrees
"""

# Calculate the vectors AB and BC
vector_AB = (b[0] - a[0], b[1] - a[1])
vector_BC = (c[0] - b[0], c[1] - b[1])

# Calculate the magnitudes of AB and BC
magnitude_AB = np.linalg.norm(vector_AB)
magnitude_BC = np.linalg.norm(vector_BC)
#
# Calculate the dot product of AB and BC
dot_product = np.dot(vector_AB, vector_BC)

# Calculate the angle in radians using the dot product and magnitudes
angle_radians = np.arccos(dot_product / (magnitude_AB * magnitude_BC))

# Convert the angle to degrees
angle_degrees = np.degrees(angle_radians)
return angle_degrees

def transform(self, df, origin, basis_vector_h, basis_vector_v) -> pd.DataFrame:
"""
Transform the coordinates of the df into the new coordinate system

:param df: pd.DataFrame with tracking data
:param origin: coordinate of origin
:param basis_vector_h: coordinate of horizontal basis vector
:param basis_vector_v: coordinate of vertical basis vector
:return: transformed pd.DataFrame
"""
transformed_df = df.copy()
final_transformed_df = df.copy()

# 2d rotation matrix
a = basis_vector_h[0]
b = basis_vector_h[1]
c = basis_vector_v[0]
d = basis_vector_v[1]

for marker in df.columns.get_level_values(1).unique():

# shift coordinates into origin system
transformed_df.loc[:, (self.scorer, marker, "x")] -= origin[0]
transformed_df.loc[:, (self.scorer, marker, "y")] -= origin[1]

# linear algebra 2D transformation
final_transformed_df.loc[:, (self.scorer, marker, "y")] = (
(transformed_df.loc[:, (self.scorer, marker, "y")] / b)
- (transformed_df.loc[:, (self.scorer, marker, "x")] / a)
) / ((-c / a) + (d / b))

final_transformed_df.loc[:, (self.scorer, marker, "x")] = (
transformed_df.loc[:, (self.scorer, marker, "x")]
- c * final_transformed_df.loc[:, (self.scorer, marker, "y")]
) / a

return final_transformed_df

def scale_df(self, df, scale_factor_h, scale_factor_v) -> pd.DataFrame:
"""
:param df: pd.DataFrame with tracking data
:param scale_factor_h: horizontal scale factor
:param scale_factor_v: vertical scale factor
:return: scaled pd.DataFrame
"""
scaled_df = df.copy()
for marker in df.columns.get_level_values(1).unique():
scaled_df.loc[:, (self.scorer, marker, "x")] *= scale_factor_h
scaled_df.loc[:, (self.scorer, marker, "y")] *= scale_factor_v
return scaled_df
28 changes: 28 additions & 0 deletions transform_and_scale/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
# Easy transformation and scaling of DeepLabCut Data

#### Background
DeepLabCut is a widely used markerless pose estimation toolbox in behavioral science. The output of DeepLabCut is coordinates in pixels for each frame for each marker. However, in most cases the coordinates first need to be translated (i.e. adapted to the coordinate-space of the behavioral maze) and scaled (e.g. to cm) to enable meaningful behavioral quantification.

#### Functionality
The Transform_DLC repository takes care of a specific task: It takes DeepLabCut dataframes as the input and outputs transformed and scaled DeepLabCut dataframes. Requirements for this transformation and scaling are that the behavioral maze or a reference has to be rectangular and its corners have to be tracked with DeepLabCut.

#### Usage
For an example usecase check out the tutorial notebook!

1) Set hyperparameter in config file: Specify the name of the horizontal and vertical basis vector markers (in a rectangular maze these are two opposing corners) and the origin (the corner that connects the two basis vector corners). Additionally, set the DeepLabCut-likelihood threshold for those markers, and the scale factors (optional, if you don´t need scaling set them to 1). For quality control set show_angle to True.

2) Instantiate the DLCTransformer class with the filepath to your DeepLabCut tracked data and the config-filepath.

3) Use .run on your instantiated class object and save the output in a variable of your choice.


#### Contribution
This is a [Defense Circuits Lab](https://www.defense-circuits-lab.com/) project written by [Michael Schellenberger](https://github.com/MSchellenberger) for [DLCutils](https://github.com/DeepLabCut/DLCutils).
<table>
<td>
<a href="https://www.defense-circuits-lab.com/">
<img src="https://static.wixstatic.com/media/547baf_87ffe507a5004e29925dbeb65fe110bb~mv2.png/v1/fill/w_406,h_246,al_c,q_85,usm_0.66_1.00_0.01,enc_auto/LabLogo3black.png" alt="DefenseCircuitsLab" style="width: 250px;"/>
</a>
</td>

</table>
Binary file not shown.
Binary file not shown.
7 changes: 7 additions & 0 deletions transform_and_scale/config.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
origin_marker: 'RodLowerLeft'
basis_vector_h_marker: 'RodLowerRight'
basis_vector_v_marker: 'RodUpperLeft'
scale_factor_h: 5.8
scale_factor_v: 3
dlc_immobile_marker_threshold: 0.9
show_angle: True
21 changes: 21 additions & 0 deletions transform_and_scale/read_config.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
import yaml


def read_config(config_path: str) -> dict:
"""
Reads structured config file defining a project.

:param config_path: path to config file
"""

try:
with open(config_path, "r") as ymlfile:
config_file = yaml.load(ymlfile, Loader=yaml.SafeLoader)
except FileNotFoundError:
raise (
"Could not find the config file at "
+ config_path
+ " \n Please make sure the path is correct and the file exists"
)

return config_file
Binary file not shown.
63 changes: 63 additions & 0 deletions transform_and_scale/transform_and_scale_tutorial.ipynb
Original file line number Diff line number Diff line change
@@ -0,0 +1,63 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from DLCTransformer import DLCTransformer"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"dlc_filepath = r\"test_data/DefenseCircuitsLab2023_Rotarod_testfile.h5\"\n",
"config_filepath = r\"config.yaml\"\n",
"transformer = DLCTransformer(dlc_filepath=dlc_filepath, config_filepath=config_filepath)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"scaled = transformer.run()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"scaled.head()"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.12"
}
},
"nbformat": 4,
"nbformat_minor": 1
}