Skip to content

Commit

Permalink
Update classification + sorted predictions
Browse files Browse the repository at this point in the history
  • Loading branch information
c-h-benedetti committed Jan 13, 2025
1 parent 47f743c commit 2b351bc
Show file tree
Hide file tree
Showing 13 changed files with 133 additions and 75 deletions.
2 changes: 1 addition & 1 deletion src/microglia_analyzer/__init__.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
__version__ = "0.2.0"
__version__ = "1.0.0"

import re

Expand Down
6 changes: 4 additions & 2 deletions src/microglia_analyzer/_tests/test_tiles.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,8 @@
( 64, 32, ( 64, 128)),
( 64, 32, ( 128, 64)),
( 64, 32, ( 128, 128)),
(512, 128, (2048, 2048))
(512, 128, (2048, 2048)),
(512, 128, (1024, 1024))
]

# (Number of patches Y-axis, Number of patches X-axis)
Expand All @@ -43,7 +44,8 @@
(1, 3),
(3, 1),
(3, 3),
(5, 5)
(5, 5),
(3, 3)
]

# Blending between patches (for tiles_to_image)
Expand Down
6 changes: 3 additions & 3 deletions src/microglia_analyzer/_widget.py
Original file line number Diff line number Diff line change
Expand Up @@ -136,7 +136,7 @@ def segment_microglia_panel(self):
h_layout.addWidget(self.minimal_area_label)
self.minimal_area_input = QSpinBox()
self.minimal_area_input.setRange(0, 1000000)
self.minimal_area_input.setValue(110)
self.minimal_area_input.setValue(40)
self.minimal_area_input.valueChanged.connect(self.min_area_update)
h_layout.addWidget(self.minimal_area_input)
layout.addLayout(h_layout)
Expand Down Expand Up @@ -330,8 +330,8 @@ def export_measures(self):
self.thread.start()

def run_batch(self):
self.total = len(self.get_all_tiff_files(self.sources_folder))
self.pbr = progress(total=self.total)
# self.total = len(self.get_all_tiff_files(self.sources_folder))
self.pbr = progress()
self.pbr.set_description("Running on folder...")
self.set_active_ui(False)
self.thread = QThread()
Expand Down
9 changes: 8 additions & 1 deletion src/microglia_analyzer/_widget_annotations_helper.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
import tifffile
import skimage

from skimage.morphology import skeletonize
import numpy as np
import os

Expand All @@ -28,6 +29,8 @@
"""
_MASKS_LAYER = "µ-glia-mask"

_SKELETON_LAYER = "µ-skeleton"

"""
Colors assigned to each YOLO class.
This array can be overriden if needed.
Expand Down Expand Up @@ -332,6 +335,10 @@ def save_masks(self):
if _MASKS_LAYER in self.viewer.layers:
tifffile.imwrite(mask_path, self.viewer.layers[_MASKS_LAYER].data)
show_info("Masks saved.")
if _SKELETON_LAYER in self.viewer.layers:
self.viewer.layers[_SKELETON_LAYER].data = skeletonize(self.viewer.layers[_MASKS_LAYER].data)
else:
layer = self.viewer.add_image(skeletonize(self.viewer.layers[_MASKS_LAYER].data), name=_SKELETON_LAYER, blending='additive')

# ----------------- METHODS -------------------------------------------

Expand Down Expand Up @@ -474,7 +481,7 @@ def set_root_directory(self, directory):
Args:
- directory (str): The absolute path to the root directory.
"""
folders = sorted([f for f in os.listdir(directory) if (not f.endswith('-labels')) and os.path.isdir(os.path.join(directory, f))])
folders = sorted([f for f in os.listdir(directory) if (not f.endswith('-labels')) and (not f.endswith('-masks')) and os.path.isdir(os.path.join(directory, f))])
folders = ["---"] + folders
self.inputs_name.clear()
self.inputs_name.addItems(folders)
Expand Down
10 changes: 9 additions & 1 deletion src/microglia_analyzer/dl/losses.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,11 @@ def dice_loss(y_true, y_pred):
intersection = tf.reduce_sum(y_true * y_pred)
return 1 - (2. * intersection + 1) / (tf.reduce_sum(y_true) + tf.reduce_sum(y_pred) + 1)

def dual_dice_loss(y_true, y_pred):
c1 = 0.3
c2 = 1.0 - c1
return c1 * dice_loss(y_true, y_pred) + c2 * dice_loss(1 - y_true, 1 - y_pred)

def bce_dice_loss(bce_coef=0.5):
def bcl(y_true, y_pred):
bce = tf.keras.losses.binary_crossentropy(y_true, y_pred)
Expand All @@ -53,4 +58,7 @@ def dice_skeleton_loss(skeleton_coef=0.5, bce_coef=0.5):
def _dice_skeleton_loss(y_true, y_pred):
y_pred = tf.square(y_pred)
return (1.0 - skeleton_coef) * bdl(y_true, y_pred) + skeleton_coef * skeleton_recall(y_true, y_pred)
return _dice_skeleton_loss
return _dice_skeleton_loss

def dsl(y_true, y_pred):
return dice_skeleton_loss(0.5, 0.5)(y_true, y_pred)
46 changes: 23 additions & 23 deletions src/microglia_analyzer/dl/unet2d_training.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,9 +12,7 @@
import pandas as pd
from tabulate import tabulate

# from microglia_analyzer.dl.losses import (dice_loss, bce_dice_loss,
# skeleton_recall, dice_skeleton_loss)
from losses import (dice_loss, bce_dice_loss, skeleton_recall, dice_skeleton_loss)
from losses import (dice_loss, bce_dice_loss, dual_dice_loss, dice_skeleton_loss)

os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
Expand Down Expand Up @@ -79,12 +77,12 @@

## 📍 a. Data paths

data_folder = "/home/benedetti/Downloads/training-audrey"
data_folder = "/home/benedetti/Documents/projects/2060-microglia/data/training-data/experimental"
qc_folder = None
inputs_name = "inputs"
masks_name = "masks"
models_path = "/home/benedetti/Downloads/training-audrey/models"
working_directory = "/tmp/unet_working/"
inputs_name = "microglia"
masks_name = "microglia-masks"
models_path = "/home/benedetti/Documents/projects/2060-microglia/µnet"
working_directory = "/tmp/unet_working"
model_name_prefix = "unet"
reset_local_data = True
remove_wrong_data = True
Expand All @@ -95,23 +93,23 @@
validation_percentage = 0.15
batch_size = 8
epochs = 500
unet_depth = 2
unet_depth = 4
num_filters_start = 32
dropout_rate = 0.25
dropout_rate = 0.5
optimizer = 'Adam'
learning_rate = 0.001
skeleton_coef = 0.2
bce_coef = 0.5
bce_coef = 0.3
early_stop_patience = 50
dilation_kernel = diamond(1)
loss = dice_skeleton_loss(skeleton_coef, bce_coef)
loss = bce_dice_loss(bce_coef) # dice_skeleton_loss(skeleton_coef, bce_coef)

## 📍 c. Data augmentation

use_data_augmentation = True
use_mirroring = True
use_gaussian_noise = False
noise_scale = 0.001
use_gaussian_noise = True
noise_scale = 0.0005
use_random_rotations = True
angle_range = (-90, 90)
use_gamma_correction = True
Expand Down Expand Up @@ -457,7 +455,7 @@ def migrate_data(targets, source):

## 📍 a. Data augmentation functions

def deteriorate_image(image, mask, num_points=25):
def deteriorate_image(image, mask, num_points=5):
"""
Attempts to deteriorate the original image by making holes along the path.
"""
Expand All @@ -472,9 +470,13 @@ def deteriorate_image(image, mask, num_points=25):
new_image = np.full_like(mask, 0, dtype=np.uint8)
for point in selected_points:
new_image[point[0], point[1]] = 255
new_image = 1.0 - binary_dilation(new_image, footprint=dilation_kernel).astype(np.float32)
new_image = gaussian_filter(new_image, sigma=2.0)
dk = diamond(random.randint(3, 5))
new_image = 1.0 - binary_dilation(new_image, footprint=dk).astype(np.float32)
new_image = gaussian_filter(new_image, sigma=1.0+random.random())
new_image *= 0.5
new_image += 0.5
image *= new_image
# mask *= (1.0 - new_image)
return np.expand_dims(image, axis=-1), np.expand_dims(mask, axis=-1)

def random_flip(image, mask):
Expand Down Expand Up @@ -641,8 +643,8 @@ def open_pair(input_path, mask_path, training, img_only):
raw_img = tifffile.imread(input_path)
raw_img = np.expand_dims(raw_img, axis=-1)
raw_mask = tifffile.imread(mask_path)
# raw_mask = skeletonize(raw_mask)
# raw_mask = binary_dilation(raw_mask)
raw_mask = skeletonize(raw_mask)
raw_mask = binary_dilation(raw_mask)
raw_mask = raw_mask.astype(np.float32)
raw_mask /= np.max(raw_mask)
raw_mask = np.expand_dims(raw_mask, axis=-1)
Expand Down Expand Up @@ -823,12 +825,10 @@ def create_unet2d_model(input_shape):
x = Conv2DTranspose(num_filters, (3, 3), strides=(1, 1), padding='same')(x)
x = attention_block(skip_connections[i], x, intermediate_channels=8)
x = concatenate([x, skip_connections[i]])
x = Conv2D(num_filters, 3, activation='relu', padding='same', kernel_initializer='he_normal')(x)
# x = BatchNormalization()(x)
x = Conv2D(num_filters, 3, activation='relu', padding='same', kernel_initializer='he_normal')(x)
x = Conv2D(num_filters, 3, activation='sigmoid', padding='same', kernel_initializer='he_normal')(x)
x = Conv2D(num_filters, 3, activation='sigmoid', padding='same', kernel_initializer='he_normal')(x)
if i > 0:
x = BatchNormalization()(x)
# x = BatchNormalization()(x)

outputs = Conv2D(1, 1, activation='sigmoid')(x)
model = Model(inputs=inputs, outputs=outputs)
Expand Down
13 changes: 7 additions & 6 deletions src/microglia_analyzer/dl/yolov5_training.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
import numpy as np
import re
from cv2 import imread
# from tifffile import imread
from yolov5 import train

"""
Expand Down Expand Up @@ -50,12 +51,12 @@

#@markdown ## 📍 a. Data paths

data_folder = "/home/benedetti/Documents/projects/2060-microglia/yolo-user-annotations/"
data_folder = "/home/benedetti/Documents/projects/2060-microglia/data/training-data/clean-v002"
qc_folder = None
inputs_name = "images"
annotations_name = "labels"
models_path = "/home/benedetti/Documents/projects/2060-microglia/yolo-models/"
working_directory = "/home/benedetti/Documents/projects/2060-microglia/yolo-runs/"
models_path = "/home/benedetti/Documents/projects/2060-microglia/µyolo"
working_directory = "/tmp/yolo-train"
model_name_prefix = "µyolo"
reset_local_data = True

Expand All @@ -68,10 +69,10 @@
optimizer = 'AdamW'
learning_rate = 0.0001
deterministic = True
cos_lr = True
cos_lr = False
label_smoothing = 0.0
overlap_mask = False
dropout = 0.2
overlap_mask = True
dropout = 0.5

# optimizer: 'SGD', 'Adam', 'AdamW'.
# deterministic: True, False
Expand Down
37 changes: 23 additions & 14 deletions src/microglia_analyzer/experimental/tiles.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
import numpy as np
from PIL import Image
from microglia_analyzer.tiles.tiler import ImageTiler2D
from microglia_analyzer.tiles.tiler import ImageTiler2D, normalize
import tifffile

def generate_checkerboard(width, height, num_squares_x, num_squares_y):
Expand Down Expand Up @@ -53,7 +53,7 @@ def generate_checkerboard(width, height, num_squares_x, num_squares_y):
img = Image.fromarray(checkerboard)
return img

if __name__ == "__main__":
if __name__ == "":
import os
import tifffile
import numpy as np
Expand All @@ -62,12 +62,13 @@ def generate_checkerboard(width, height, num_squares_x, num_squares_y):

shapes = [
(2048, 2048),
(1024, 1024)
# (1024, 1024)
]
for shape in shapes:
print("-----------")
image = np.ones(shape, dtype=np.float32)
tiles_manager = ImageTiler2D(512, 128, shape)
print("Grid: ", tiles_manager.grid_size)
for t in tiles_manager.layout:
print(t)
tiles = tiles_manager.image_to_tiles(image)
Expand All @@ -76,14 +77,22 @@ def generate_checkerboard(width, height, num_squares_x, num_squares_y):
tifffile.imwrite(os.path.join(output_path, f"{shape[0]}_{str(i).zfill(2)}.tif"), tiles_manager.blending_coefs[i])
tifffile.imwrite(os.path.join(output_path, f"{shape[0]}_merged.tif"), merged)

if __name__ == "":
# Générer une image de 2048x2048 avec des cases de 128x128
checkerboard_img = np.squeeze(np.array(generate_checkerboard(2048, 2048, 16, 16)))
tifffile.imwrite("/tmp/original.tif", checkerboard_img)
tiles_manager = ImageTiler2D(512, 128, checkerboard_img.shape)
tiles = tiles_manager.image_to_tiles(checkerboard_img)
tifffile.imwrite("/tmp/checkerboard.tif", tiles)
merged = tiles_manager.tiles_to_image(tiles)
tifffile.imwrite("/tmp/merged.tif", merged)
tifffile.imwrite("/tmp/coefs.tif", tiles_manager.blending_coefs)
tifffile.imwrite("/tmp/gradient.tif", tiles_manager.tiles_to_image(tiles_manager.blending_coefs))
if __name__ == "__main__":
import os
import random
output_folder = "/tmp/dump/"
os.makedirs(output_folder, exist_ok=True)
for i in range(15):
sub_folder = os.path.join(output_folder, str(i).zfill(2))
os.makedirs(sub_folder, exist_ok=True)
Y = random.randint(512, 2048)
X = random.randint(512, 2048)
checkerboard_img = normalize(np.squeeze(np.array(generate_checkerboard(Y, X, 16, 16))))
tifffile.imwrite(os.path.join(sub_folder, "original.tif"), checkerboard_img)
tiles_manager = ImageTiler2D(512, 128, checkerboard_img.shape)
tiles = tiles_manager.image_to_tiles(checkerboard_img)
tifffile.imwrite(os.path.join(sub_folder, "checkerboard.tif"), tiles)
merged = tiles_manager.tiles_to_image(tiles)
tifffile.imwrite(os.path.join(sub_folder, "merged.tif"), merged)
tifffile.imwrite(os.path.join(sub_folder, "coefs.tif"), tiles_manager.blending_coefs)
tifffile.imwrite(os.path.join(sub_folder, "gradient.tif"), tiles_manager.tiles_to_image(tiles_manager.blending_coefs))
Loading

0 comments on commit 2b351bc

Please sign in to comment.