Skip to content

Commit

Permalink
Merge branch 'master' into feature/constrain-objects
Browse files Browse the repository at this point in the history
  • Loading branch information
AetherUnbound committed May 13, 2018
2 parents 8c8f085 + 3793871 commit b866396
Show file tree
Hide file tree
Showing 15 changed files with 268 additions and 32 deletions.
3 changes: 2 additions & 1 deletion .travis.yml
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@ addons:
packages:
- libhdf5-serial-dev
- python-pip
cache:
apt: true
directories: $HOME/.cache/pip
dist: trusty
Expand All @@ -21,4 +22,4 @@ notifications:
email: false
python: 2.7
script: pytest
sudo: false
sudo: false
6 changes: 6 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,12 @@ Please see help here: https://github.com/CellProfiler/CellProfiler/blob/master/c
pip install -r requirements.txt
```
To install CellProfiler-plugins on a windows machine with support for the deep learning module ClassifyPixels-UNet use
```
cd CellProfiler-plugins
pip install -r requirements-windows.txt
```
1. Configure CellProfiler plugins directory in the GUI via `Preferences > CellProfiler plugins directory` (you will need to restart CellProfiler for the change to take effect). When running CellProfiler via the command line, use the `--plugins-directory` flag to specify the plugins directory, for example:
```
cellprofiler --run --run-headless --project PROJECT_FILE --plugins-directory PLUGIN_DIRECTORY/CellProfiler-plugins
Expand Down
66 changes: 47 additions & 19 deletions unet_segmentation.py → classifypixelsunet.py
Original file line number Diff line number Diff line change
@@ -1,14 +1,26 @@
# coding=utf-8

"""
Author: Tim Becker, Juan Caicedo, Claire McQuinn
"""

import logging
import os.path
import numpy
import pkg_resources
import requests
import sys
import time

import os.path
import cellprofiler.module
import cellprofiler.setting
import keras
import numpy
import pkg_resources
import requests
import tensorflow

if sys.platform.startswith('win'):
os.environ["KERAS_BACKEND"] = "cntk"
else:
os.environ["KERAS_BACKEND"] = "tensorflow"

import keras

logger = logging.getLogger(__name__)

Expand All @@ -17,12 +29,30 @@


__doc__ = """\
tbd
ClassifyPixels-Unet calculates pixel wise classification using an UNet
network. The default network model is trained to identify nuclei, background
and the nuclei boundary. Classification results are returned as three channel
images:
* red channel stores background classification
* green channel stores nuclei classification
* blue channel stores boundary classification
In the simplest use case, the classifications are converted to gray value images
using the module ColorToGray. The module IdentifyPrimaryObjects can be
used to identify example images in the nuclei channel (green channel).
The default UNet model is downloaded and stored on the local machine. To
replace the model the function download_file_from_google_drive needs to
be updated.
Author: Tim Becker, Juan Caicedo, Claire McQuinn
"""


class UnetSegment(cellprofiler.module.ImageProcessing):
module_name = "UnetSegment"
class ClassifyPixelsUnet(cellprofiler.module.ImageProcessing):
module_name = "ClassifyPixels-Unet"
variable_revision_number = 1

def run(self, workspace):
Expand All @@ -35,15 +65,12 @@ def run(self, workspace):
t1 = time.time()
logger.debug('UNet initialization took {} seconds '.format(t1 - t0))

self.function = lambda input_image: unet_segmentation(model, input_image)
self.function = lambda input_image: unet_classify(model, input_image)

super(UnetSegment, self).run(workspace)
super(ClassifyPixelsUnet, self).run(workspace)


def unet_initialize(input_shape):
session = tensorflow.Session()
# apply session
keras.backend.set_session(session)
# create model

dim1, dim2 = input_shape
Expand All @@ -64,15 +91,16 @@ def unet_initialize(input_shape):

# Download the weights
logger.debug("Downloading model weights to: {:s}".format(weights_filename))
id = "146sae_bv2rJa4YoJr8Hlo-MFKfuepoMX"
download_file_from_google_drive(id, weights_filename)
model_id = "1I9j4oABbcV8EnvO_ufACXP9e4KyfHMtE"

download_file_from_google_drive(model_id, weights_filename)

model.load_weights(weights_filename)

return model


def unet_segmentation(model, input_image):
def unet_classify(model, input_image):
dim1, dim2 = input_image.shape

images = input_image.reshape((-1, dim1, dim2, 1))
Expand All @@ -82,11 +110,11 @@ def unet_segmentation(model, input_image):
images = images.astype(numpy.float32) / numpy.max(images)

start = time.time()
segmentation = model.predict(images, batch_size=1)
pixel_classification = model.predict(images, batch_size=1)
end = time.time()
logger.debug('UNet segmentation took {} seconds '.format(end - start))

return segmentation[0, :, :, :]
return pixel_classification[0, :, :, :]


def get_core(dim1, dim2):
Expand Down
197 changes: 197 additions & 0 deletions predict.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,197 @@
import os
import subprocess
import tempfile

import h5py # HDF5 is ilastik's preferred file format
import logging
import skimage

import cellprofiler.image
import cellprofiler.module
import cellprofiler.setting

logger = logging.getLogger(__name__)

__doc__ = """\
Predict
=======
Use an ilastik pixel classifier to generate a probability image. Each
channel represents the probability of the pixels in the image belong to
a particular class. Use **ColorToGray** to separate channels for further
processing. For example, use **IdentifyPrimaryObjects** on a
(single-channel) probability map to generate a segmentation. The order
of the channels in **ColorToGray** is the same as the order of the
labels within the ilastik project.
CellProfiler automatically scales grayscale and color images to the
[0.0, 1.0] range on load. Your ilastik classifier should be trained on
images with the same scale as the prediction images. You can ensure
consistent scales by:
- using **ImageMath** to convert the images loaded by CellProfiler back
to their original scale. Use these settings to rescale an image:
- **Operation**: *None*
- **Multiply the first image by**: *RESCALE_VALUE*
- **Set values greater than 1 equal to 1?**: *No*
where *RESCALE_VALUE* is determined by your image data and the value
of *Set intensity range from* in **NamesAndTypes**. For example, the
*RESCALE_VALUE* for 32-bit images rescaled by "*Image bit-depth*" is
65535 (the maximum value allowed by this data type). Please refer to
the help for the setting *Set intensity range from* in
**NamesAndTypes** for more information.
This option is best when your training and prediction images do not
require any preprocessing by CellProfiler.
- preprocessing any training images with CellProfiler (e.g.,
**RescaleIntensity**) and applying the same pre-processing steps to
your analysis pipeline. You can use **SaveImages** to export training
images as 32-bit TIFFs.
This option requires two CellProfiler pipelines, but is effective
when your training and prediction images require preprocessing by
CellProfiler.
Additionally, please ensure CellProfiler is configured to load images in
the same format as ilastik. For example, if your ilastik classifier is
trained on RGB images, use **NamesAndTypes** to load images as RGB by
selecting "*Color image*" from the *Select the image type* dropdown. If
your classifier expects grayscale images, use **NamesAndTypes** to load
images as "*Grayscale image*".
"""


class Predict(cellprofiler.module.ImageProcessing):
module_name = "Predict"

variable_revision_number = 1

def create_settings(self):
super(Predict, self).create_settings()

self.executable = cellprofiler.setting.Pathname(
"Executable",
doc="ilastik command line executable name, or location if it is not on your path."
)

self.project_file = cellprofiler.setting.Pathname(
"Project file",
doc="Path to the project file (\*.ilp)."
)

self.project_type = cellprofiler.setting.Choice(
"Select the project type",
[
"Pixel Classification",
"Autocontext (2-stage)"
],
"Pixel Classification",
doc="""\
Select the project type which matches the project file specified by
*Project file*. CellProfiler supports two types of ilastik projects:
- *Pixel Classification*: Classify the pixels of an image given user
annotations. `Read more`_.
- *Autocontext (2-stage)*: Perform pixel classification in multiple
stages, sharing predictions between stages to improve results. `Read
more <http://ilastik.org/documentation/autocontext/autocontext>`__.
.. _Read more: http://ilastik.org/documentation/pixelclassification/pixelclassification
"""
)

def settings(self):
settings = super(Predict, self).settings()

settings += [
self.executable,
self.project_file,
self.project_type
]

return settings

def visible_settings(self):
visible_settings = super(Predict, self).visible_settings()

visible_settings += [
self.executable,
self.project_file,
self.project_type
]

return visible_settings

def run(self, workspace):
image = workspace.image_set.get_image(self.x_name.value)

x_data = image.pixel_data

fin = tempfile.NamedTemporaryFile(suffix=".h5", delete=False)

fout = tempfile.NamedTemporaryFile(suffix=".h5", delete=False)

cmd = [
self.executable.value,
"--headless",
"--project", self.project_file.value,
"--output_format", "hdf5"
]

if self.project_type.value in ["Pixel Classification"]:
cmd += ["--export_source", "Probabilities"]
elif self.project_type.value in ["Autocontext (2-stage)"]:
x_data = skimage.img_as_ubyte(x_data) # ilastik requires UINT8. Might be relaxed in future.

cmd += ["--export_source", "probabilities stage 2"]
#cmd += ["--export_source", "probabilities all stages"]

cmd += [
"--output_filename_format", fout.name,
fin.name
]

try:
with h5py.File(fin.name, "w") as f:
shape = x_data.shape

if x_data.ndim == 2:
# ilastik appears to add a channel dimension
# even if the image is grayscale
shape += (1,)

f.create_dataset("data", shape, data=x_data)

fin.close()

fout.close()

subprocess.check_call(cmd)

with h5py.File(fout.name, "r") as f:
y_data = f["exported_data"].value

y = cellprofiler.image.Image(y_data)

workspace.image_set.add(self.y_name.value, y)

if self.show_window:
workspace.display_data.x_data = x_data

workspace.display_data.y_data = y_data

workspace.display_data.dimensions = image.dimensions
except subprocess.CalledProcessError as cpe:
logger.error("Command {} exited with status {}".format(cpe.output, cpe.returncode), cpe)

raise cpe
except IOError as ioe:
raise ioe
finally:
os.unlink(fin.name)

os.unlink(fout.name)
3 changes: 3 additions & 0 deletions requirements-windows.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
cellh5
keras
cntk
7 changes: 4 additions & 3 deletions tests/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ def image(request):
return cellprofiler.image.Image(image=data, dimensions=dimensions)


@pytest.fixture(scope="module")
@pytest.fixture(scope="function")
def image_empty():
image = cellprofiler.image.Image()

Expand Down Expand Up @@ -71,11 +71,12 @@ def measurements():
return cellprofiler.measurement.Measurements()


@pytest.fixture(scope="module")
@pytest.fixture(scope="function")
def module(request):
instance = getattr(request.module, "instance")

return instance
return instance()


@pytest.fixture(scope="function")
def objects(image):
Expand Down
2 changes: 1 addition & 1 deletion tests/test_blobdetection.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ def image(request):
return cellprofiler.image.Image(image=data, dimensions=dimensions)


instance = blobdetection.BlobDetection()
instance = blobdetection.BlobDetection


def test_run_dog(image, image_set, module, workspace):
Expand Down
2 changes: 1 addition & 1 deletion tests/test_edgedetection.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@

import edgedetection

instance = edgedetection.EdgeDetection()
instance = edgedetection.EdgeDetection


def test_run_without_mask(image, image_set, module, workspace):
Expand Down
2 changes: 1 addition & 1 deletion tests/test_gammacorrection.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@

import gammacorrection

instance = gammacorrection.GammaCorrection()
instance = gammacorrection.GammaCorrection


def test_run(image, module, image_set, workspace):
Expand Down
Loading

0 comments on commit b866396

Please sign in to comment.