__author__ = 'David Tadres'
__project__ = 'PiVR'
import json
import operator
import os
import tkinter as tk
import traceback
from tkinter import messagebox
import imageio
import numpy as np
from PIL import Image, ImageTk
from scipy import ndimage
from skimage import transform
# only needed in debug mode
from skimage.draw import line
from skimage.io import imread
from skimage.measure import regionprops, label
from pathlib import Path
import tracking_help_classes as thc
# this try-except statement checks if the processor is a ARM processor
# (used by the Raspberry Pi) or not.
# Since this command only works in Linux it is caught using
# try-except otherwise it's throw an error in a Windows system.
try:
if os.uname()[4][:3] == 'arm':
# This will yield True for both a Raspberry and for M1 Chip
# Apple devices.
# Use this code snippet
# (from https://raspberrypi.stackexchange.com/questions/5100/detect-that-a-python-program-is-running-on-the-pi)
import re
CPUINFO_PATH = Path("/proc/cpuinfo")
if CPUINFO_PATH.exists():
with open(CPUINFO_PATH) as f:
cpuinfo = f.read()
if re.search(r"^Model\s*:\s*Raspberry Pi", cpuinfo, flags=re.M) is not None:
# if True, is Raspberry Pi
RASPBERRY = True
LINUX = True
else: # Test if one more intendation necessary or not. On Varun's computer
# is Apple M1 chip (or other Arm CPU device).
RASPBERRY = False
LINUX = True
else:
# is either Mac or Linux
RASPBERRY = False
LINUX = True
DIRECTORY_INDICATOR = '/'
except AttributeError:
# is Windows
RASPBERRY = False
LINUX = False
DIRECTORY_INDICATOR = '\\'
if RASPBERRY:
from picamera.array import PiYUVArray
[docs]class FindAnimal:
"""
.. Intended Audience: User who wants to understand the code.
The user should already have some experience and at least read
the :ref:`CodeExplanation<CodeExplanationLabel>`
.. _AnimalDetectionClass:
Before the algorithm can start tracking it first needs to
**identify the animal** and create a **background image** that
can be used for the rest of the experiment. Three "Animal Detection
Modes" are available. See
:ref:`here<AnimalDetectionExplanationLabel>` for a high
level description which one should consult to understand the
advantages and limitations of each Mode.
**Mode 1:**
If the background is not evenly illuminated or if the animal
moves fast and often goes to the edge
:ref:`Mode 1 <CodeExplanationMode1Label>` is a
safe and easy choice.
#) Identify the region of the picture where the animal is
located by detecting movement. For this
:func:`find_roi_mode_one_and_three` is called.
#) Reconstruct the background image from the mean image
while the animal was identified. For this
:func:`define_animal_mode_one` is called.
**Mode 2:**
:ref:`Mode 2 <CodeExplanationMode2Label>` can be used if the
animal can be added to the arena without changing anything in the
field of view of the camera while doing so.
#) Takes a picture before the animal is placed and a
second picture after the animal is placed. This approach was
used before in the SOS tracker (`Gomez-Marin et al.,
2012 <https://journals.plos.org/plosone/article?id=10.1371/journal.pone.0041642>`_).
This is called animal detection Mode 2. This
only works if the only object that is different in
the image is the animal that one wants to track. Slight changes in between trials
such as, placing a lid on the arena, can result in detectable changes in the image
which often breaks this approach! For this :func:`find_roi_mode_two` is called.
#) Computationally the identification of the animal in two
images, one with and one without, is very simple. Just
subtract the two images, and what is standing out must be
the object the user wants to track. For this
:func:`define_animal_mode_two` is called
**Mode 3:**
This method is a bit more complicated compared to Mode 1 and Mode 2.
It attempts to combine the ease of use of Mode 1 with the
perfectly "clean" background image required by Mode 2.
This method only works well if several conditions are
:ref:`met <CodeExplanationMode3Label>`. We only used this
method with the slow fruit fly larva. See
:ref:`here<CodeExplanationMode3Label>` for a detailed high level
description
#) Identify the region of the picture where the animal is
located by detecting movement. This is in fact identical
to Mode 1, as the same function is called:
:func:`find_roi_mode_one_and_three`
#) Then the animal must be defined using a binary image. This
is a critical step and necessitates that the animal
clearly stands out compared to the background. The
function is: :func:`define_animal_mode_three`.
#) To reconstruct the background (for the experiment) the
animal must leave the original position. The relevant
function: :func:`animal_left_mode_three`
#) Then the background is reconstructed by combining the background
image minus the area that contained the animal at the start
and adding the same region to the background without the animal
(once the animal has moved away from its initial position); thus
resulting in a neat background image for trajectory display. The
function doing that is :func:`background_reconstruction_mode_three`
#) For the tracking algorithm to start it needs to know
where the animal went while all of the above was going on.
:func:`animal_after_box_mode_three`
Finally, this class also holds an offline animal detection
function: :func:`find_roi_post_hoc`. This is used when running
**Todo-Link** Post-Hoc Single animal tracking. For example for
debugging or to define a new model organism the user wants to
track in the future.
"""
def __init__(self,
boxsize,
signal,
debug_mode,
stringency_size = 0.01,
stringency_centroid = 0.01,
cam = None,
resolution = [640,480],
recording_framerate = 2,
display_framerate = 2,
model_organism = None,
offline_analysis=False,
pixel_per_mm=None,
organisms_and_heuristics=None,
post_hoc_tracking=False,
animal_detection_mode = 'Mode 1',
simulated_online_analysis=False,
datetime='not defined'
):
self.recording_framerate = recording_framerate
self.boxsize = boxsize
self.signal = signal
self.debug_mode = debug_mode
if debug_mode:
self.resolution = resolution# todo why?
self.resize = 1#preview_resize
if not Raspberry:
self.display_framerate = display_framerate
self.interval = (1 / display_framerate) * 1000
self. stringency_size = stringency_size
self.stringency_centroid = stringency_centroid
if self.signal == 'white':
self.compare = operator.gt
self.box_intensity = 255
elif self.signal == 'dark':
self.compare = operator.lt
self.box_intensity = 0
else:
print('signal has to be either "bright" or "dark".'
'Please adjust code.')
import sys
sys.exit()
# this is the camera object if we are on a Raspberry Pi or
# the path to the file we want to analyze
self.camera = cam
# if we are not on the Raspberry, call the images what they
# are to avoid confusion!
if not Raspberry:
self.images = self.camera
self.counter = None
self.pixel_per_mm = pixel_per_mm
self.model_organism = model_organism
self.organisms_and_heuristics = organisms_and_heuristics
self.filled_area_min = \
pixel_per_mm \
* organisms_and_heuristics[
model_organism]['filled_area_min_mm']
self.filled_area_max \
= pixel_per_mm \
* organisms_and_heuristics[
model_organism]['filled_area_max_mm']
self.eccentricity_min = \
organisms_and_heuristics[model_organism]['eccentricity_min']
self.eccentricity_max = \
organisms_and_heuristics[model_organism]['eccentricity_max']
self.major_over_minor_axis_min = \
organisms_and_heuristics[
model_organism]['major_over_minor_axis_min']
self.major_over_minor_axis_max = \
organisms_and_heuristics[
model_organism]['major_over_minor_axis_max']
if self.debug_mode:
print('filled_area_min set to: ' + repr(self.filled_area_min))
print('filled_area_max set to:' + repr(self.filled_area_max))
print('eccentricity_min set to:' + repr(self.eccentricity_min))
print('eccentricity_max set to:' + repr(self.eccentricity_max))
print('major_over_minor_axis_min set to:'
+ repr(self.major_over_minor_axis_min))
print('major_over_minor_axis_max set to:'
+ repr(self.major_over_minor_axis_max))
self.offline_analysis = offline_analysis
self.images_as_npy=True
if offline_analysis or simulated_online_analysis:
if type(cam) is np.ndarray:
self.images_as_npy = True
# RESOLUTION IS IN FORMAT: 640x480 (Column, then row!)
self.resolution = [self.images.shape[1],
self.images.shape[0]]
#print(resolution)
elif type(cam) is list:
self.images_as_npy = False
temp = imread(self.images[0])
# RESOLUTION IS IN FORMAT: 640x480 (Column, then row!)
self.resolution = [temp.shape[1], temp.shape[0]]
#print(resolution)
self.post_hoc_tracking = post_hoc_tracking
self.animal_detection_mode = animal_detection_mode
self.simulated_online_analysis = simulated_online_analysis
self.datetime = datetime
# initialize a variable for the unfiltered background
self.unfiltered_background = None
# To make the whole animal detection method more user
# friendly, open a popup window
if not post_hoc_tracking: # Todo: and not simulated_online_analysis
self.child_preexperiment = tk.Toplevel()
self.child_preexperiment.grab_set()
self.child_preexperiment.wm_title('Cancel Animal detection')
self.child_preexperiment.geometry('300x70+550+0')
self.cancel_label = tk.Label(
self.child_preexperiment,
text='Is animal detection taking too long?',
font="Helvetica 10 bold")
self.cancel_label.grid(row=0, column=0)
self.cancel_button = tk.Button(
self.child_preexperiment,
text='Cancel Animal Detection',
font="Helvetica 10 bold",
command=self.cancel_animal_detection_func)
self.cancel_button.grid(row=1, column=0)
self.cancel_button.update()
else:
self.child_preexperiment = None
self.child = None
# Best would be to use a sigma which is ~half the minimal
# cross section. Problem is that this parameter isn't readily
# available. But could just use filled_area_min_mm (for
# larvae = 8) and divide by the major_over_minor_axis_min (
# larvae = 1.25). Therefore, in the worst case the animal
# will be 1.25/8=0.15mm wide if Pixel/mm is 3.5 this would
# mean that the larvae would cover minimally 0.15*3.5 = 0.525
# pixel. As pixels are integer this would mean it's possible
# to have images where the animal is one pixel 'thick'.
# Therefore
# a sigma of 0.5 should be used:
minimal_cross_section_animal = \
(self.major_over_minor_axis_min/self.filled_area_min)\
*self.pixel_per_mm
if minimal_cross_section_animal < 2:
# because minimum detectable thickness is one pixel
minimal_cross_section_animal = 2
self.sigma_for_image_filtering = minimal_cross_section_animal/2
if self.sigma_for_image_filtering > 2.5:
self.sigma_for_image_filtering = 2.5 # empirical
print('Sigma for filtering: ' + repr(self.sigma_for_image_filtering))
# A bool switch that will be turned to true if the animal was
# detected (meaning no error ocurred)
self.animal_detected = False
# Another bool switch that indicates
# whether the user has hit the 'cancel animal detection' button
self.cancel_animal_detection_bool = False
# after some brainstorming: The best way to let user decide
# on animal detection/background reconstruction:
# 1) mode 1 - standard. This is the difficult background
# 2) mode 2 - pre-define background. This is SOS mode
# 3) mode 3- background reconstruction by stitching. This
# is currently the standard
# the point of changing the standard is that mode 1 should
# work in almost all cases. Users who don't care should just
# use that mode as it will allow robust VR
# experiments/tracking. For the others we have mode 2 and 3 to
# choose from.
self.STD = None # Needed for animal detection Mode 3,
# define_animal
try:
if post_hoc_tracking:
print('post hoc tracking identification of animal '
'started')
self.find_roi_post_hoc()
if not self.cancel_animal_detection_bool:
self.define_animal_post_hoc()
elif animal_detection_mode == 'Mode 1':
print('Animal Detection Mode 1 started')
self.find_roi_mode_one_and_three()
if not self.cancel_animal_detection_bool:
self.define_animal_mode_one()
elif animal_detection_mode == 'Mode 2':
print('Animal Detection Mode 2 started')
self.find_roi_mode_two()
if not self.cancel_animal_detection_bool:
self.define_animal_mode_two()
elif animal_detection_mode == 'Mode 3':
print('Animal Detection Mode 3 started')
self.find_roi_mode_one_and_three()
if not self.cancel_animal_detection_bool:
self.define_animal_mode_three()
if not self.cancel_animal_detection_bool:
self.animal_left_mode_three()
self.background_reconstruction_mode_three()
self.animal_after_box_mode_three()
except Exception as caught_error:
self.error_message_pre_exp_func(error_stack=caught_error)
self.animal_detected = False
if Raspberry and not self.post_hoc_tracking:
self.child_preexperiment.grab_release()
self.child_preexperiment.destroy()
[docs] def find_roi_mode_one_and_three(self):
"""
Identification of the original region of interest (ROI):
This function identifies a region in the image that contains
pixels that change over time. The assumption is that the only
object moving in the field of view should be the animal the
user is interested in.
To achieve this, the camera provides images. This function
will take the mean of the images taken so far. It will then,
starting from the second frame, start to subtract the newest
frame from the previously taken images. In the resulting
image, anything that moves will clearly stand out compared
to the background. A region of interest is then drawn around
those pixels to be used later on.
"""
if self.debug_mode:
self.child = tk.Toplevel()
self.child.grab_set()
self.child.wm_title('Looking for a movement')
self.child.attributes("-topmost", True) # force on top
child_frame = tk.Frame(self.child,
width=self.resolution[0],
height=self.resolution[1])
child_frame.pack()
child_canvas = tk.Canvas(self.child,
width=self.resolution[0],
height=self.resolution[1])
child_canvas.place(x=0, y=0)
#try:
self.identification_images = None
self.roi_found = False
self.counter = 0
"""
This while loop runs as long until it finds something that
could be the animal - if debug mode is on, the user will be
asked if that's ok - if not it's called again to keep looking.
The more images have to be taken the lower the background
will be as the images mean is taken and the more
clear the moving animal should be visible.
"""
while self.roi_found == False and not \
self.cancel_animal_detection_bool:
self.cancel_button.update()
print('Frame: ' + repr(self.counter))
blobs = []
if self.counter == 0:
if Raspberry and not self.offline_analysis:
# capturing a frame, The "with" makes sure the
# camera stream is closed again immediately after
# usage.
with PiYUVArray(self.camera) as output:
self.camera.capture(output, 'yuv', use_video_port=True)
# the camera always takes an YUV picture - for
# the tracking the software only needs to get
# one gray channel.
# immediately filter the image using a
# gaussian filter to get rid of camera noise
self.first_image = \
ndimage.filters.gaussian_filter(
output.array[:,:,0],
sigma=self.sigma_for_image_filtering)
self.unfiltered_background = \
output.array[:, :, 0].copy()
# if not on the Raspberry or if offline analysis,
# take the pre-recorded images
else:
# if the user provided a numpy array us it
if self.images_as_npy:
# immediately filter the image using a
# gaussian filter to get rid of camera noise
self.first_image = \
ndimage.filters.gaussian_filter(
self.images[:,:,self.counter],
sigma=self.sigma_for_image_filtering)
self.unfiltered_background = \
self.images[:,:,self.counter].copy()
# else use the sorted list of names that was
# defined with the user input
else:
temp = imread(self.images[self.counter])
# immediately filter the image using a
# gaussian filter to get rid of camera noise
self.first_image = \
ndimage.filters.gaussian_filter(
temp[:,:,0],
sigma=self.sigma_for_image_filtering)
self.unfiltered_background = temp[:, :, 0].copy()
self.goodbackground = self.first_image.copy()
#self.identification_images =
# np.zeros((self.first_image.shape[0],
# self.first_image.shape[1],20),
# dtype=np.uint8)
# 20 frames must be enough to find something moving
# of course depends on the framerate!
# On the Raspberry this usually takes quite a while
# as no matter the fps asked by the user, it will
# almost certainly be lower as the operation needs to
# be done on the whole image (usually 2fps or so)
# We assume that most animals move after 10-20 seconds
if Raspberry:
preallocated_images = 20
self.identification_images = np.zeros((
self.first_image.shape[0],
self.first_image.shape[1],
preallocated_images),
dtype=np.uint8)
else:
# If, on the other hand, someone takes a video at
# a high framerate, e.g. at 80fps, it's quite
# possible that the animal does not move in the
# first half second. It's impossible to determine
# exactly how many frames should be preallocated,
# but 500 frames of 1024x768 is ~50Mb, so this
# should work even on PCs with essentially no RAM.
preallocated_images = 500
self.identification_images = np.zeros((
self.first_image.shape[0],
self.first_image.shape[1],
preallocated_images),
dtype=np.uint8)
self.identification_images[:,:,0] = self.first_image
elif self.counter > 0 :
# this really shouldn't happen on the RasbperryPi.
# It might happen when the user records a high-speed
# video of an animal that doesn't move a lot (or very
# sporadically). This is NOT a very efficient way to
# preallocate the array as the old array together
# with a newly allocated array needs to be copied
# to a new location in memory. This will make
# consecutive operations faster, however.
if self.counter % preallocated_images == 0 and \
self.counter != 0:
self.identification_images = \
np.dstack((self.identification_images,
np.zeros((self.first_image.shape[0],
self.first_image.shape[1],
preallocated_images))
)).copy()
if Raspberry:
with PiYUVArray(self.camera) as output:
self.camera.capture(output, 'yuv', use_video_port=True)
# immediately use a gaussian filter on that
# image and save it
self.identification_images[:,:,self.counter] = \
ndimage.filters.gaussian_filter(
output.array[:,:,0],
sigma=self.sigma_for_image_filtering)
else:
# if the user provided a numpy array use it
if self.images_as_npy:
# immediately use a gaussian filter on that
# image and save it
self.identification_images[:,:,self.counter] = \
ndimage.filters.gaussian_filter(
self.images[:,:,self.counter],
sigma=self.sigma_for_image_filtering)
# else use the sorted list of names that was
# defined with the user input
else:
temp = imread(self.images[self.counter])
# immediately use a gaussian filter on that
# image and save it
self.identification_images[:,:,self.counter] = \
ndimage.filters.gaussian_filter(
temp[:,:,0],
sigma=self.sigma_for_image_filtering)
# take the mean of all the images taken so far
image_mean = np.nanmean(
self.identification_images[:, :, 0:self.counter],
axis=2)
# idea to speed up if many images are being taken
# subtract the current image from the mean image
subtracted_image = \
self.identification_images[:, :, self.counter] \
- image_mean
# now we should have a trimodal distribution of pixel
# intensity. we just take mean of the smoothened
# image - a given factor (2) * a given sigma (2) -
# seems to be better to keep this static - tried to
# use the STD of the subtracted image - at the first
# couple of images it's very noisy and it detects a
# lot of things that are not the animal!
if self.signal == 'white':
locate_thresh = np.nanmean(subtracted_image) - 2 * 2
thresh_image = subtracted_image < locate_thresh
else:
locate_thresh = np.nanmean(subtracted_image) + 2 * 2
thresh_image = subtracted_image > locate_thresh
# regionprops will be used throughout the tracking
# software:
# http://scikit-image.org/docs/dev/api/skimage.measure.html#skimage.measure.regionprops
props = regionprops(label(thresh_image))
# in case we have more than one regionproperty
if len(props) > 1:
# go through all of them
for i_props in range(len(props)):
if self.debug_mode:
try:
print('in find ROI prop#'
+ repr(i_props)
+ ' : Filled Area: '
+ repr(props[i_props].filled_area)
+ ' eccentricity: '
+ repr(props[
i_props].eccentricity)[0:5]
+ ' major over minor: '
+ repr(props[
i_props].major_axis_length
/ props[i_props].minor_axis_length)[0:5])
except ZeroDivisionError:
pass
# 3 rules at the moment:
# 1) the are must be in a certain range.
# 2) The eccentrecity (='roundness') must be
# in a certain range (e.g. it shouldn't be a
# circle) and
# 3) the major axis / minor axis must be in a
# certain range maybe can kick out the
# eccentricity? Major/minor is more intuitive.
if self.filled_area_min \
< props[i_props].filled_area \
< self.filled_area_max \
and self.eccentricity_min\
< props[i_props].eccentricity \
< self.eccentricity_max \
and self.major_over_minor_axis_min \
< props[i_props].major_axis_length\
/props[i_props].minor_axis_length \
< self.major_over_minor_axis_max:
blobs.append(i_props)
if self.debug_mode:
print('Interesting blob in prop#'
+ repr(i_props)
+ ' : Filled Area: '
+ repr(props[
i_props].filled_area)
+ ' eccentricity: '
+ repr(props[
i_props].eccentricity)[0:5]
+ ' major over minor: '
+ repr(props[
i_props].major_axis_length
/props[i_props].minor_axis_length)[0:5])
# if there's only one regionprop, perfect,
# found the moving object
if len(blobs) == 1:
# Call the FindROI function with boxsize
# + the skeleton length. Necessary for slow
# animals, such as larvae
self.first_roi = thc.FindROI(
props[blobs[0]],
self.boxsize +
self.organisms_and_heuristics[
self.model_organism]['max_skeleton_length_mm']
*self.pixel_per_mm,
2,
self.first_image)
self.roi_found = True
print('only one blob at x:'
+ repr(props[blobs[0]].centroid[1])[0:4]
+ ' y: '
+ repr(props[blobs[0]].centroid[0])[0:4])
# Temporarily save these parameters for
# display in messagebox if debug mode is on
filled_area = props[blobs[0]].filled_area
eccentricity = props[blobs[0]].eccentricity
major_over_minor = \
props[blobs[0]].major_axis_length \
/ props[blobs[0]].minor_axis_length
# otherwise the animal hasn't moved enough yet,
# or too many things moved, so try again with
# another picture
else:
pass
# if there's only one regionprop, and if it's bigger
# than a defined minimum, perfect, found the animal
elif len(props) == 1:
if props[0].area > self.filled_area_min:
# If there's only one blob and it makes the
# cut for size, try to take the object and
# say this is the animal we're looking for
self.first_roi = thc.FindROI(
props[0],
self.boxsize
+ self.organisms_and_heuristics[
self.model_organism]['max_skeleton_length_mm']
*self.pixel_per_mm,
2,
self.first_image)
self.roi_found = True
# Temporarily save these parameters for
# display in messagebox if debug mode is on
filled_area = props[0].filled_area
eccentricity = props[0].eccentricity
major_over_minor = props[0].major_axis_length \
/ props[0].minor_axis_length
print('only one prop')
# otherwise try again next picture
elif len(props) == 0:
pass
if self.debug_mode and self.counter > 0:
# take the boolean image and change to uint8 as this
# is the format ImageTK.PhotoImage can take the image
if self.counter == 1:
converted_binary = np.zeros(
(
thresh_image.shape[0],thresh_image.shape[1], 3
),
dtype=np.uint8)
converted_binary[:, :, :] = \
thresh_image[:,:,np.newaxis].astype(np.uint8)
#converted_binary = thresh_image.astype(np.uint8)
# convert all the ones (from true) into 255
converted_binary[np.where(converted_binary == 1)] = 255
# would be best to draw bounding boxes around all the
# regionprops that have been identified as possible
# animals. This will help with fine-tuning the animal
# parameters
for i_blobs_bboxes in range(len(blobs)):
# the left vertical line
rr, cc = line(int(props[blobs[i_blobs_bboxes]].bbox[0]),
int(props[blobs[i_blobs_bboxes]].bbox[1]),
int(props[blobs[i_blobs_bboxes]].bbox[2]),
int(props[blobs[i_blobs_bboxes]].bbox[1]))
try:
converted_binary[rr, cc, 0] = 255
except IndexError: # if blob found at the edge
pass
# the top horizontal line
rr, cc = line(int(props[blobs[i_blobs_bboxes]].bbox[0]),
int(props[blobs[i_blobs_bboxes]].bbox[1]),
int(props[blobs[i_blobs_bboxes]].bbox[0]),
int(props[blobs[i_blobs_bboxes]].bbox[3]))
try:
#converted_binary[rr,cc] = 255
converted_binary[rr, cc, 0] = 255
except IndexError: # if blob found at the edge
pass
# the bottom horizontal line
rr, cc = line(int(props[blobs[i_blobs_bboxes]].bbox[2]),
int(props[blobs[i_blobs_bboxes]].bbox[1]),
int(props[blobs[i_blobs_bboxes]].bbox[2]),
int(props[blobs[i_blobs_bboxes]].bbox[3]))
try:
#converted_binary[rr,cc] = 255
converted_binary[rr, cc, 0] = 255
except IndexError: # if blob found at the edge
pass
# the right verticalline
rr, cc = line(int(props[blobs[i_blobs_bboxes]].bbox[0]),
int(props[blobs[i_blobs_bboxes]].bbox[3]),
int(props[blobs[i_blobs_bboxes]].bbox[2]),
int(props[blobs[i_blobs_bboxes]].bbox[3]))
try:
#converted_binary[rr,cc] = 255
converted_binary[rr, cc, 0] = 255
except IndexError: # if blob found at the edge
pass
photo = ImageTk.PhotoImage(
image=Image.fromarray(converted_binary))
child_canvas.create_image(
0, 0, image=photo, anchor=tk.NW)
self.child.update()
self.counter += 1
if self.roi_found:
if self.debug_mode:
stop_searching_roi = \
messagebox.askyesno(
'PiVR Information',
'The algorithm found something that is moving!\n'
'If that does not look at all like your animal\n'
'press "No", to continue press "Yes"\n'
'Filled Area: ' + repr(filled_area)[0:5] + '\n'
'Eccentricity: ' + repr(eccentricity)[0:5] + '\n'
'Major over minor axis: '
+ repr(major_over_minor)[0:5],
parent=self.child)
#print(stop_searching_roi)
if not stop_searching_roi:
self.roi_found = False
if self.debug_mode:
self.child.grab_release()
self.child.destroy()
#except:
# print('Unable to locate animal. Please restart. If you
# keep seeing this error message, you have to change '
# 'your settings or your setup.')
# import sys
# sys.exit()
print('ROI defined!')
[docs] def find_roi_mode_two(self):
"""
Sometimes the user can not use the automatic animal
detection Method 3 because the background is not completely
homogeneous. If the user still needs to have a clear
background image without any trace of the animal this
Methods can be used. It is similar to the one used in
Gomez-Marin et al., 2011.
#. Take an image before placing the animal
#. Place the animal
#. Take another picture and subtract from the first
"""
if not self.simulated_online_analysis:
tk.messagebox.showinfo('PiVR information',
'Hit "OK" to take an image without '
'the animal.')
# capturing a frame, The "with" makes sure the camera
# stream is closed again immediately after usage.
with PiYUVArray(self.camera) as output:
self.camera.capture(output, 'yuv', use_video_port=True)
# the camera always takes an YUV picture - for the
# tracking the software only needs to get one gray
# channel.
self.background_image = output.array[:, :, 0]
# Todo: This is different compared to mode 1 and three.
# Make sure sigma 1 is enough here
self.smoothed_goodbackground = \
ndimage.filters.gaussian_filter(
self.background_image, sigma=1)
imageio.imsave('Background.tiff', self.background_image)
#imageio.imsave('Background.jpg', self.background_image)
tk.messagebox.showinfo('PiVR information',
'Now place the animal and then '
'hit "OK"')
else:
# if the user provided a numpy array us it
if self.images_as_npy:
self.background_image = np.nanmean(self.images,axis=2)
# else use the sorted list of names that was defined with
# the user input
else:
# TODO - this is a hack! It currently just takes the
# last image and sets it as the background!
# FIX IT - not urgent as this is only happening if
# the user wants to simulate Mode 2, not a problem
# for actual experiments
temp = imread(self.images[-1])
#print(temp.shape)
self.background_image = temp[:, :, 0]
self.smoothed_goodbackground = \
ndimage.filters.gaussian_filter(
self.background_image, sigma=1)
imageio.imsave('Background.tiff', self.background_image)
#imageio.imsave('Background.jpg', self.background_image)
if self.debug_mode:
self.child = tk.Toplevel()
self.child.grab_set()
self.child.wm_title('Looking for a movement - Mode 2')
self.child.attributes("-topmost", True) # force on top
child_frame = tk.Frame(
self.child,
width=self.resolution[1],
height=self.resolution[0])
child_frame.pack()
child_canvas = tk.Canvas(
self.child,
width=self.resolution[1],
height=self.resolution[0])
child_canvas.place(x=0, y=0)
self.roi_found = False
self.counter = 0
while self.roi_found == False \
and not self.cancel_animal_detection_bool:
self.cancel_button.update()
print('Frame: ' + repr(self.counter))
blobs = []
if Raspberry and not self.offline_analysis:
with PiYUVArray(self.camera) as output:
self.camera.capture(output, 'yuv', use_video_port=True)
# the camera always takes an RGB picture - for
# the tracking the software only needs to get one
# gray channel.
self.first_image = output.array[:,:,0]
# if not on the Raspberry or if offline analysis,
# take the pre-recorded images
else:
# if the user provided a numpy array us it
if self.images_as_npy:
self.first_image = self.images[:, :, self.counter]
# else use the sorted list of names that was defined
# with the user input
else:
temp = imread(self.images[self.counter])
#print(temp.shape)
self.first_image = temp[:, :, 0]
# subtract the current image from the mean image
first_subtracted_image = self.background_image.astype(np.int16) \
- self.first_image.astype(np.int16)
# calculate the standard deviation of the subtracted
# image over all pixels.
std = np.std(first_subtracted_image)
# due to camera noise we unfortunately have to "smear"
# the image a bit, we do that with this
# gaussian filter
smoothed_image = ndimage.filters.gaussian_filter(
first_subtracted_image, sigma=std)
# now we should have a gaussian distribution of pixel
# intensity. we just take mean of the smoothened image -
# a given factor * the standard deviation subtracted,
# unsmoothened image
if self.signal == 'white':
self.overall_threshold = np.nanmean(smoothed_image) \
- 2 * std
thresh_image = smoothed_image < self.overall_threshold
else:
self.overall_threshold = np.nanmean(smoothed_image) \
+ 2 * std
thresh_image = smoothed_image > self.overall_threshold
# regionprops will be used throughout the tracking software:
# http://scikit-image.org/docs/dev/api/skimage.measure.html#skimage.measure.regionprops
props = regionprops(label(thresh_image))
# in case we have more than one regionproperty
if len(props) > 1:
# go through all of them
for i_props in range(len(props)):
if self.debug_mode:
try:
print('in find ROI prop#' + repr(i_props)
+ ' : Filled Area: '
+ repr(props[i_props].filled_area)
+ ' eccentricity: '
+ repr(props[
i_props].eccentricity)[0:5]
+ ' major over minor: '
+ repr(props[
i_props].major_axis_length
/ props[i_props].minor_axis_length)[0:5])
except ZeroDivisionError:
pass
# 3 rules at the moment:
# 1) the are must be in a certain range.
# 2) The eccentrecity (='roundness') must be in a
# certain range (e.g. it shouldn't be a circle) and
# 3) the major axis / minor axis must be in a
# certain range
# maybe can kick out the eccentricity?
# Major/minor is more intuitive.
if self.filled_area_min \
< props[i_props].filled_area \
< self.filled_area_max \
and \
self.eccentricity_min \
< props[i_props].eccentricity \
< self.eccentricity_max \
and \
self.major_over_minor_axis_min \
< props[i_props].major_axis_length \
/props[i_props].minor_axis_length \
< self.major_over_minor_axis_max:
blobs.append(i_props)
if self.debug_mode:
print('Interesting blob in prop#'
+ repr(i_props)
+ ' : Filled Area: '
+ repr(props[i_props].filled_area)
+ ' eccentricity: '
+ repr(props[
i_props].eccentricity)[0:5]
+ ' major over minor: '
+ repr(props[
i_props].major_axis_length
/ props[i_props].minor_axis_length)[0:5])
# if there's only one regionprop, perfect, found the
# moving object
if len(blobs) == 1:
self.first_roi = thc.FindROI(
props[blobs[0]],
self.boxsize, 2,
self.first_image)
self.roi_found = True
print('only one blob at x:'
+ repr(props[blobs[0]].centroid[1])
+ ' y: '
+ repr(props[blobs[0]].centroid[0]))
# Temporarily save these parameters for display
# in messagebox if debug mode is on
filled_area = props[blobs[0]].filled_area
eccentricity = props[blobs[0]].eccentricity
major_over_minor = \
props[blobs[0]].major_axis_length \
/ props[blobs[0]].minor_axis_length
# otherwise the animal hasn't moved enough yet,
# or too many things moved, so try again with another
# picture
else:
pass
# if there's only one regionprop, and if it's bigger than
# a defined minimum, perfect, found the animal
elif len(props) == 1:
if props[0].area > self.filled_area_min:
# If there's only one blob and it makes the cut
# for size, try to take the object and say this
# is the animal we're looking for
# todo would be good to use the true-animal form
# to be sure to actually detect the animal
# not just the largest blob
self.first_roi = thc.FindROI(
props[0], self.boxsize, 2, self.first_image)
self.roi_found = True
# Temporarily save these parameters for
# display in messagebox if debug mode is on
filled_area = props[0].filled_area
eccentricity = props[0].eccentricity
major_over_minor = props[0].major_axis_length \
/ props[0].minor_axis_length
print('only one prop')
# otherwise try again next picture
elif len(props) == 0:
pass
if self.debug_mode and self.counter > 0:
converted_binary = thresh_image.astype(np.uint8)
converted_binary[np.where(converted_binary==1)] = 255
photo = ImageTk.PhotoImage(
image=Image.fromarray(converted_binary))
child_canvas.create_image(
0,0, image=photo, anchor=tk.NW)
self.child.update()
self.counter += 1
if self.roi_found:
if self.debug_mode:
stop_searching_roi = messagebox.askyesno(
'PiVR information',
'The algorithm found something that is moving!\n'
'If that does not look at all like your animal\n'
'press "No", to continue press "Yes"\n'
'Filled Area: ' + repr(filled_area)[0:5] + '\n'
'Eccentricity: ' + repr(eccentricity)[0:5] + '\n'
'Major over minor axis: ' + repr(major_over_minor)[0:5])
#print(stop_searching_roi)
if not stop_searching_roi:
self.roi_found = False
self.child.grab_release()
self.child.destroy()
[docs] def find_roi_post_hoc(self):
"""
Identifies animal in post-hoc analysis.
Normally used when user defines a new animal The workflow
consists of the user taking a
:ref:`video <VideoLabel>` and then running the **TODO**-Link
*Post-Hoc Single Animal Analysis*. This function
identifies the animal before the actual tracking starts.
It first reads all the images (user should provide which file
format the images are in) and zips them up so that the folder
gets easier to copy around. It also creates a numpy array with
all the images for this script to use.
It then takes the mean of all the
images to create the background image.
It then smoothens the background image using a gaussian
filter with sigma 1.
It then starts to loop over as many images as necessary by:
#. subtracting the mean (background) image from the
background.
#. Calculate the threshold by defining everything below
or above (depending on **TODO** link *signal*) 2*std from
the mean as signal.
#. Use the `regionprop function
<http://scikit-image.org/docs/dev/api/skimage.measure.html#skimage.measure.regionprops>`_
to measure the properties of the labeled image regions.
#. Depending on the amount of propertes, different rules apply:
#. If more than one props, cycle through them testing
if they are fulfilling the minimal requirements to
count as potential animals: *Filled Area* Min and
Max, *Eccentricity* Min and Max, and *major over
minor axis* Min and Max.
#. If one found > That's the animal
#. Else, go to next image
#. If only one props, that's the animal and break out
of the loop
#. If no blob, go to next image
"""
if self.debug_mode:
self.child = tk.Toplevel()
self.child.grab_set()
self.child.wm_title('Looking for a movement')
self.child.attributes("-topmost", True) # force on top
child_frame = tk.Frame(
self.child,
width=self.resolution[0],
height=self.resolution[1])
child_frame.pack()
child_canvas = tk.Canvas(
self.child,
width=self.resolution[0],
height=self.resolution[1])
child_canvas.place(x=0, y=0)
# try:
self.identification_images = None
self.roi_found = False
self.counter = 0
# take the mean image (everything that moves during the
# experiment disappears
self.mean_image = np.nanmean(self.images, axis=2)
# save it as in the folder
imageio.imsave('Background.tiff', self.mean_image)
#imageio.imsave('Background.jpg', self.mean_image)
# then smoothen the image with the identical filter that will
# be used for every image afterwards
self.smoothed_goodbackground = \
ndimage.filters.gaussian_filter(
self.mean_image, sigma=1)
std_warning_shown = False
# I noticed that when doing post-hoc analysis with high
# framerates the animal detection algorithm doesn't perform
# well.
# This makes sense: Since the algorithm is looking for a
# moving animal at high framerates the animal will move very
# little between single frames hence it will often detect
# noise (especially since the video encoder introduces
# additional noise).
# One solution would be to just define that the first or the
# first two seconds are not used to calculate the subtracted
# image.
# Note: This is not a problem with real-time tracking as the
# time before detection is much slower than e.g. 15Hz!
min_time = 1 # seconds
self.counter = min_time * self.recording_framerate
while self.roi_found == False \
and not self.cancel_animal_detection_bool:
# self.cancel_button.update()
print('Frame: ' + repr(self.counter))
blobs = []
# keep a reference for the current image
self.first_image = self.images[:, :, self.counter]
# subtract the current image from the mean image
first_subtracted_image = \
self.mean_image.astype(np.int16) \
- self.images[:, :, self.counter].astype(np.int16)
# calculate the standard deviation of the subtracted
# image over all pixels.
std = np.std(first_subtracted_image)
# if std of the subtracted image is very high it suggests
# that the background lightning conditions changed
# during the experiment
if std > 10 and not std_warning_shown:
tk.messagebox.showinfo(
'Possible change of background during recording',
'The standard deviation of the mean image '
'subtracted'
'\nby the first image is ' + repr(std) + '.'
'\nThe expected value would be around 0'
'\nThis indicates that a large part of the image '
'seems to have'
'\nchanged intensity during the recording, '
'possibly due to a'
'\nchange of background lighting during the '
'experiment.\n'
)
std_warning_shown = True
# due to camera noise we unfortunately have to "smear"
# the image a bit, we do that with this gaussian filter -
# originally had std as the sigma but that breaks badly if
# the illumination is uneven!
smoothed_image = ndimage.filters.gaussian_filter(
first_subtracted_image, sigma=2)
# now we should have a gaussian distribution of pixel
# intensity. we just take mean of the smoothened image -
# a given factor * the standard deviation subtracted,
# unsmoothened image
if self.signal == 'white':
self.overall_threshold = np.nanmean(
smoothed_image) - 2 * std
thresh_image = smoothed_image \
< self.overall_threshold
else:
self.overall_threshold = np.nanmean(
smoothed_image) + 2 * std
thresh_image = smoothed_image \
> self.overall_threshold
# regionprops will be used throughout the tracking software:
# http://scikit-image.org/docs/dev/api/skimage.measure.html#skimage.measure.regionprops
props = regionprops(label(thresh_image))
# in case we have more than one regionproperty
if len(props) > 1:
# go through all of them
for i_props in range(len(props)):
if self.debug_mode:
try:
print('in find ROI prop#'
+ repr(i_props)
+ ' : Filled Area: '
+ repr(props[i_props].filled_area)
+ ' eccentricity: '
+ repr(props[
i_props].eccentricity)[0:5]
+ ' major over minor: '
+ repr(props[
i_props].major_axis_length
/ props[
i_props].minor_axis_length)[
0:5])
except ZeroDivisionError:
pass
# 3 rules at the moment:
# 1) the are must be in a certain range.
# 2) The eccentrecity (='roundness') must be in a
# certain range (e.g. it shouldn't be a circle) and
# 3) the major axis / minor axis must be in a
# certain range
# maybe can kick out the eccentricity?
# Major/minor is more intuitive.
if self.filled_area_min \
< props[i_props].filled_area \
< self.filled_area_max \
and self.eccentricity_min \
< props[i_props].eccentricity \
< self.eccentricity_max \
and self.major_over_minor_axis_min \
< props[i_props].major_axis_length \
/ props[i_props].minor_axis_length \
< self.major_over_minor_axis_max:
blobs.append(i_props)
if self.debug_mode:
print('Interesting blob in prop#'
+ repr(i_props)
+ ' : Filled Area: '
+ repr(props[i_props].filled_area)
+ ' eccentricity: '
+ repr(props[
i_props].eccentricity)[0:5]
+ ' major over minor: '
+ repr(props[
i_props].major_axis_length
/ props[
i_props].minor_axis_length)[
0:5])
# if there's only one regionprop, perfect, found
# the moving object
if len(blobs) == 1:
self.first_roi = thc.FindROI(
props[blobs[0]],
self.boxsize,
2,
self.first_image)
self.roi_found = True
# Temporarily save these parameters for display
# in messagebox if debug mode is on
filled_area = props[blobs[0]].filled_area
eccentricity = props[blobs[0]].eccentricity
major_over_minor = props[blobs[0]].major_axis_length \
/ props[
blobs[0]].minor_axis_length
print('only one blob at x:'
+ repr(props[blobs[0]].centroid[1])
+ ' y: '
+ repr(props[blobs[0]].centroid[0]))
# otherwise the animal hasn't moved enough yet,
# or too many things moved, so try again with another
# picture
else:
pass
# if there's only one regionprop, and if it's bigger than
# a defined minimum, perfect, found the animal
elif len(props) == 1:
if props[0].area > self.filled_area_min:
# If there's only one blob and it makes the cut
# for minimal size, try to take the object and
# say this is the animal we're looking for
self.first_roi = thc.FindROI(
props[0],
self.boxsize,
2,
self.first_image)
# Temporarily save these parameters for display
# in messagebox if debug mode is on
filled_area = props[0].filled_area
eccentricity = props[0].eccentricity
major_over_minor = props[0].major_axis_length \
/ props[0].minor_axis_length
self.roi_found = True
print('only one prop')
# otherwise try again next picture
elif len(props) == 0:
pass
if self.debug_mode: # and self.counter > 0:
# take the boolean image and change to uint8 as this
# is the format ImageTK.PhotoImage can take the image
if self.counter == min_time * self.recording_framerate: #
converted_binary = np.zeros((
thresh_image.shape[0],
thresh_image.shape[1],
3), dtype=np.uint8)
converted_binary[:, :, :] = \
thresh_image[:, :, np.newaxis].astype(np.uint8)
# converted_binary = thresh_image.astype(np.uint8)
# convert all the ones (from true) into 255
converted_binary[np.where(converted_binary == 1)] = 255
# would be best to draw bounding boxes around all the
# regionprops that have been identified as possible
# animals. This will help with fine-tuning the animal
# parameters
for i_blobs_bboxes in range(len(blobs)):
# the left vertical line
rr, cc = line(
int(props[blobs[i_blobs_bboxes]].bbox[0]),
int(props[blobs[i_blobs_bboxes]].bbox[1]),
int(props[blobs[i_blobs_bboxes]].bbox[2]),
int(props[blobs[i_blobs_bboxes]].bbox[1]))
try:
converted_binary[rr, cc, 0] = 255
except IndexError: # if blob found at the edge
pass
# the top horizontal line
rr, cc = line(
int(props[blobs[i_blobs_bboxes]].bbox[0]),
int(props[blobs[i_blobs_bboxes]].bbox[1]),
int(props[blobs[i_blobs_bboxes]].bbox[0]),
int(props[blobs[i_blobs_bboxes]].bbox[3]))
try:
# converted_binary[rr,cc] = 255
converted_binary[rr, cc, 0] = 255
except IndexError: # if blob found at the edge
pass
# the bottom horizontal line
rr, cc = line(
int(props[blobs[i_blobs_bboxes]].bbox[2]),
int(props[blobs[i_blobs_bboxes]].bbox[1]),
int(props[blobs[i_blobs_bboxes]].bbox[2]),
int(props[blobs[i_blobs_bboxes]].bbox[3]))
try:
# converted_binary[rr,cc] = 255
converted_binary[rr, cc, 0] = 255
except IndexError: # if blob found at the edge
pass
# the right verticalline
rr, cc = line(
int(props[blobs[i_blobs_bboxes]].bbox[0]),
int(props[blobs[i_blobs_bboxes]].bbox[3]),
int(props[blobs[i_blobs_bboxes]].bbox[2]),
int(props[blobs[i_blobs_bboxes]].bbox[3]))
try:
# converted_binary[rr,cc] = 255
converted_binary[rr, cc, 0] = 255
except IndexError: # if blob found at the edge
pass
photo = ImageTk.PhotoImage(image=Image.fromarray(
converted_binary))
child_canvas.create_image(0, 0, image=photo,
anchor=tk.NW)
self.child.update()
self.counter += 1
if self.roi_found:
if self.debug_mode:
stop_searching_roi = messagebox.askyesno(
'PiVR information',
'The algorithm found something that is moving!'
'\nIf that does not look at all like your animal'
'\npress "No", to continue press "Yes"'
'\nFilled Area: '
+ repr(filled_area)[0:5]
+ '\nEccentricity: '
+ repr(eccentricity)[0:5]
+ '\nMajor over minor axis: '
+ repr(major_over_minor)[0:5]
+ '\n At Frame: ' + repr(self.counter),
parent=self.child)
#print(stop_searching_roi)
if not stop_searching_roi:
self.roi_found = False
if self.debug_mode:
self.child.grab_release()
self.child.destroy()
print('ROI defined!')
[docs] def define_animal_mode_one(self):
"""
This function is called when the user uses Animal Detection
Mode #1
This function does not do local thresholding of the first
frame. Instead it just reconstructs the background image from
the mean image it has constructed while identifying the
animal. This will almost always leave part of the animal in
the background image. Usually this is not a problem as the
whole animal is larger than just a part of it.
"""
imageio.imsave('Background.tiff', self.unfiltered_background)
#imageio.imsave('Background.jpg', self.unfiltered_background)
self.smoothed_goodbackground = ndimage.gaussian_filter(
self.goodbackground, sigma=1)
if Raspberry:
with PiYUVArray(self.camera) as output:
self.camera.capture(output, 'yuv', use_video_port=True)
current_image = output.array[:, :, 0]
else:
# if the user provided a numpy array us it
if self.images_as_npy:
current_image = self.images[:, :, self.counter]
# else use the sorted list of names that was defined with
# the user input
else:
temp = imread(self.images[self.counter])
current_image = temp[:, :, 0]
smooth_current_image = ndimage.gaussian_filter(
current_image, sigma=1)
smooth_subtracted_image = \
self.smoothed_goodbackground.astype(np.int16) - \
smooth_current_image.astype(np.int16)
std = np.std(smooth_subtracted_image)
if self.signal == 'white':
locate_thresh = np.nanmean(smooth_subtracted_image) - 2 * std
thresh_image = smooth_subtracted_image < locate_thresh
else:
locate_thresh = np.nanmean(smooth_subtracted_image) + 2 * std
thresh_image = smooth_subtracted_image > locate_thresh
animal_properties = regionprops(
label(thc.CallImageROI(thresh_image, self.first_roi).small_image))
if self.debug_mode:
print(len(animal_properties))
for special_counter in range(len(animal_properties)):
try:
print('in define animal: '
+ repr(special_counter)
+ ' Filled Area: '
+ repr(animal_properties[
special_counter].filled_area)
+ ' eccentricity: '
+ repr(animal_properties[
special_counter].eccentricity)[0:5]
+ ' major over minor: '
+ repr(animal_properties[
special_counter].major_axis_length
/animal_properties[special_counter].minor_axis_length)[0:5])
except ZeroDivisionError:
print(repr(special_counter) + ' minor axis was zero')
pass
# This is a bit confusing - this self.first_animal of Mode 1
# is never used for tracking (just for the display of the
# debug mode)
# Check all the connected regions and define the largest object as the animal
self.first_animal = thc.DescribeLargestObject(
animal_properties,
self.first_roi,
animal_like=True,
filled_area_min=self.filled_area_min,
filled_area_max=self.filled_area_max,
eccentricity_min=self.eccentricity_min,
eccentricity_max=self.eccentricity_max,
major_over_minor_axis_min=self.major_over_minor_axis_min,
major_over_minor_axis_max=self.major_over_minor_axis_max)
self.first_row_min = self.first_animal.row_min
self.first_row_max = self.first_animal.row_max
self.first_col_min = self.first_animal.col_min
self.first_col_max = self.first_animal.col_max
'''
first_frame_data = {'filled area': float(self.first_animal.filled_area),
'centroid row': float(self.first_animal.centroid_row),
'centroid col': float(self.first_animal.centroid_col),
'bounding box row min': float(self.first_animal.row_min),
'bounding box row max': float(self.first_animal.row_max),
'bounding box col min': float(self.first_animal.col_min),
'bounding box col max': float(self.first_animal.col_max)
}
# save the first frame data - now as json!
with open('first_frame_data.json', 'w') as file:
json.dump(first_frame_data, file, sort_keys=True, indent=4)
'''
if self.debug_mode:
raw_image_to_plot = thc.CallImageROI(
self.smoothed_goodbackground, self.first_roi).small_image
binary_image_to_plot = thc.CallImageROI(
thresh_image, self.first_roi).small_image
self.child = tk.Toplevel()
self.child.grab_set()
self.child.wm_title('Largest object will be defined as '
'the animal')
self.child.attributes("-topmost", True) # force on top
# child_frame.pack()
label_left = tk.Label(self.child,
text='Bounding Box around animal')
label_left.grid(row=0, column=0)
child_canvas_left = tk.Canvas(
self.child,
width=raw_image_to_plot.shape[1] * self.resize,
height=raw_image_to_plot.shape[0] * self.resize)
child_canvas_left.grid(row=1, column=0)
label_right = tk.Label(self.child,
text='Binary image - largest object '
'\n will be defined as the animal')
label_right.grid(row=0, column=1)
child_canvas_right = tk.Canvas(
self.child,
width=binary_image_to_plot.shape[1] * self.resize,
height=binary_image_to_plot.shape[0] * self.resize)
child_canvas_right.grid(row=1, column=1)
# here the identified bounding box is drawn around the animal
image_object = thc.DrawBoundingBox(
self.first_image,
self.first_animal,
value=self.box_intensity)
image_to_plot = thc.CallImageROI(
image_object.image_with_box,
self.first_roi).small_image
image_to_plot = transform.resize(
image_to_plot, (int(image_to_plot.shape[0] * self.resize),
int(image_to_plot.shape[1] * self.resize)),
preserve_range=True, mode='reflect')
photo_bounding_box = ImageTk.PhotoImage(
image=Image.fromarray(image_to_plot))
child_canvas_left.create_image(
0, 0, image=photo_bounding_box, anchor=tk.NW)
converted_binary = binary_image_to_plot.astype(np.uint8)
converted_binary[np.where(converted_binary == 1)] = 255
converted_binary = transform.resize(
converted_binary, (
int(converted_binary.shape[0] * self.resize),
int(converted_binary.shape[1] * self.resize)),
preserve_range=True, mode='reflect'
)
photo_binary = ImageTk.PhotoImage(
image=Image.fromarray(converted_binary))
child_canvas_right.create_image(
0, 0, image=photo_binary, anchor=tk.NW)
self.child.update()
# The messagebox blocks th execution of the code until
# the user click's ok
messagebox.showinfo('PiVR information',
'The first image using local '
'thresholding!'
'\n The algorithm will use the largest '
'object as the animal'
'\n please press "OK" to continue',
parent=self.child)
self.child.grab_release()
self.child.destroy()
print('Animal defined after frame ' + repr(self.counter))
# todo need this for VR to work - can I rename this somehow?
self.animal_after_box = thc.DescribeLargestObject(
animal_properties, self.first_roi, self.boxsize)
first_frame_data = {'filled area': float(self.animal_after_box.filled_area),
'centroid row': float(self.animal_after_box.centroid_row),
'centroid col': float(self.animal_after_box.centroid_col),
'bounding box row min': float(self.animal_after_box.row_min),
'bounding box row max': float(self.animal_after_box.row_max),
'bounding box col min': float(self.animal_after_box.col_min),
'bounding box col max': float(self.animal_after_box.col_max)
}
# save the first frame data - now as json!
with open('first_frame_data.json', 'w') as file:
json.dump(first_frame_data, file, sort_keys=True, indent=4)
if self.child_preexperiment is not None:
self.child_preexperiment.destroy()
self.animal_detected = True
[docs] def define_animal_mode_two(self):
"""
With the information where to look we identify the animal
using local thresholding (we couldn't do that before
with the whole image)
This is only saved if the animal is somewhere where the
background illumination is relatively even and the
animal stands out clearly relative to it's immediate background!
"""
# subtract the current image from the mean image
first_subtracted_image = self.background_image.astype(np.int16)\
- self.first_image.astype(np.int16)
# calculate the standard deviation of the subtracted image
# over all pixels.
std = np.std(first_subtracted_image)
smoothed_image = ndimage.filters.gaussian_filter(
first_subtracted_image, sigma=std)
# now we should have a gaussian distribution of pixel
# intensity. we just take mean of the smoothened image - a
# given factor * the standard deviation subtracted,
# unsmoothened image
if self.signal == 'white':
self.first_image_thresholded = smoothed_image \
< self.overall_threshold
else:
self.first_image_thresholded = smoothed_image \
> self.overall_threshold
# Call the binary image and identify connected regions only
# in the defined roi
animal_properties = regionprops(
label(
thc.CallImageROI(
self.first_image_thresholded,
self.first_roi).small_image))
if self.debug_mode:
print(len(animal_properties))
for special_counter in range(len(animal_properties)):
try:
print('in define animal: '
+ repr(special_counter)
+ ' Filled Area: '
+ repr(animal_properties[
special_counter].filled_area)
+ ' eccentricity: '
+ repr(animal_properties[
special_counter].eccentricity)[0:5]
+ ' major over minor: '
+ repr(animal_properties[
special_counter].major_axis_length
/ animal_properties[
special_counter].minor_axis_length)[0:5])
except ZeroDivisionError:
print(repr(special_counter) + ' minor axis was zero')
pass
# Check all the connected regions and define the largest
# object as the animal
self.first_animal = thc.DescribeLargestObject(
animal_properties,
self.first_roi,
animal_like=True,
filled_area_min=self.filled_area_min,
filled_area_max=self.filled_area_max,
eccentricity_min=self.eccentricity_min,
eccentricity_max=self.eccentricity_max,
major_over_minor_axis_min=self.major_over_minor_axis_min,
major_over_minor_axis_max=self.major_over_minor_axis_max)
self.first_row_min = self.first_animal.row_min
self.first_row_max = self.first_animal.row_max
self.first_col_min = self.first_animal.col_min
self.first_col_max = self.first_animal.col_max
first_frame_data = {'filled area': float(self.first_animal.filled_area),
'centroid row': float(self.first_animal.centroid_row),
'centroid col': float(self.first_animal.centroid_col),
'bounding box row min': float(self.first_animal.row_min),
'bounding box row max': float(self.first_animal.row_max),
'bounding box col min': float(self.first_animal.col_min),
'bounding box col max': float(self.first_animal.col_max)
}
# save the first frame data - now as json!
with open('first_frame_data.json', 'w') as file:
json.dump(first_frame_data, file, sort_keys=True, indent=4)
if self.debug_mode:
binary_image_to_plot = thc.CallImageROI(
self.first_image_thresholded,
self.first_roi).small_image
self.child = tk.Toplevel()
self.child.grab_set()
self.child.wm_title('Largest object will be defined as '
'the animal')
self.child.attributes("-topmost", True) # force on top
child_frame = tk.Frame(
self.child,
width=(binary_image_to_plot.shape[1] * self.resize) * 2,
height=binary_image_to_plot.shape[0] * self.resize)
# child_frame.pack()
label_left = tk.Label(self.child,
text='Bounding Box around animal')
label_left.grid(row=0, column=0)
child_canvas_left = tk.Canvas(
self.child,
width=binary_image_to_plot.shape[1] * self.resize,
height=binary_image_to_plot.shape[0] * self.resize)
child_canvas_left.grid(row=1, column=0)
label_right = tk.Label(
self.child, text='Binary image - largest object'
'\n' 'will be defined as the animal')
label_right.grid(row=0, column=1)
child_canvas_right = tk.Canvas(
self.child,
width=binary_image_to_plot.shape[1] * self.resize,
height=binary_image_to_plot.shape[0] * self.resize)
child_canvas_right.grid(row=1, column=1)
# here the identified bounding box is drawn around the animal
image_object = thc.DrawBoundingBox(
self.first_image,
self.first_animal,
value=self.box_intensity)
image_to_plot = thc.CallImageROI(
image_object.image_with_box,
self.first_roi).small_image
image_to_plot = transform.resize(
image_to_plot,
(int(image_to_plot.shape[0] * self.resize),
int(image_to_plot.shape[1] * self.resize)),
preserve_range=True, mode='reflect')
photo_bounding_box = ImageTk.PhotoImage(
image=Image.fromarray(image_to_plot))
child_canvas_left.create_image(
0, 0, image=photo_bounding_box, anchor=tk.NW)
converted_binary = binary_image_to_plot.astype(np.uint8)
converted_binary[np.where(converted_binary == 1)] = 255
converted_binary = transform.resize(
converted_binary,
(int(converted_binary.shape[0] * self.resize),
int(converted_binary.shape[1] * self.resize)),
preserve_range=True,
mode='reflect'
)
photo_binary = ImageTk.PhotoImage(
image=Image.fromarray(converted_binary))
child_canvas_right.create_image(
0, 0, image=photo_binary, anchor=tk.NW)
self.child.update()
# The messagebox blocks th execution of the code until
# the user click's ok
messagebox.showinfo('PiVR information',
'The first image using local thresholding!'
'\n The algorithm will use the '
'largest object as the animal'
'\n please press "OK" to continue',
parent=self.child)
self.child.grab_release()
self.child.destroy()
print('Animal defined at Frame: ' + repr(self.counter))
# todo need this for VR to work - can I rename this somehow?
self.animal_after_box = thc.DescribeLargestObject(
animal_properties, self.first_roi, self.boxsize)
if self.child_preexperiment is not None:
self.child_preexperiment.destroy()
self.animal_detected = True
[docs] def define_animal_mode_three(self):
"""
It uses the information about which region to look for the larva using local thresholding (we couldn't do that before
with the whole image)
This only works if the animal is somewhere where the
background illumination is relatively even and the
animal stands out clearly relative to it's immediate background!
"""
decrease_STDs = True
self.STD = 3
while decrease_STDs:
# call class MeanThresh with the very first image taken,
# the first ROI identified, the user delivered signal
# and a standard deviation (STD) of three
self.first_thresh = thc.MeanThresh(
self.first_image,
self.signal,
self.STD,
self.first_roi)
# this is the thresholded first image we'll use for
# defining the area occupied by the animal
self.first_image_thresholded = self.compare(
self.first_image, self.first_thresh.thresh)
# Call the binary image and identify connected regions
# only in the defined roi - must make sure that the
# identified blob can indeed be the animal (according to
# the heuristic rules) or if the STD needs to be
# decreased - will make detection slower, though. Maybe
# just manually decrease the STD from 3 to 2
animal_properties = regionprops(
label(
thc.CallImageROI(
self.first_image_thresholded,
self.first_roi).small_image
))
if len(animal_properties) > 0:
print('found at least one blob')
decrease_STDs = False
else:
self.STD -= 0.1
print('Found nothing...adjusting to '
+ repr(self.STD) + ' STDs')
if self.debug_mode:
print('animal_properties length'
+ repr(len(animal_properties)))
for special_counter in range(len(animal_properties)):
try:
print('in define animal: '
+ repr(special_counter)
+ ' Filled Area: '
+ repr(animal_properties[
special_counter].filled_area)
+ ' eccentricity: '
+ repr(animal_properties[
special_counter].eccentricity)[0:5]
+ ' major over minor: '
+ repr(animal_properties[
special_counter].major_axis_length
/animal_properties[
special_counter].minor_axis_length)[0:5])
except ZeroDivisionError:
print(repr(special_counter) + ' minor axis was zero')
pass
# Check all the connected regions and define the largest
# object as the animal
self.first_animal = thc.DescribeLargestObject(
animal_properties,
self.first_roi,
animal_like=True,
filled_area_min=self.filled_area_min,
filled_area_max=self.filled_area_max,
eccentricity_min=self.eccentricity_min,
eccentricity_max=self.eccentricity_max,
major_over_minor_axis_min=self.major_over_minor_axis_min,
major_over_minor_axis_max=self.major_over_minor_axis_max)
# Problem: If the animal is not detected (e.g. because the
# blob that was moving before does not look like the animal
# according to the saved_variables.json defined animal) it
# won't fail here but at a later point.
# Not-so-pretty solution: Do something that will fail anyways
# later, here to make error tracing more intuitive!
if not self.first_animal.animal_like_object_detected:
tk.messagebox.showerror('PiVR Information',
'No animal like object could be detected!'
'\nYou probably have an abnormal animal '
'- either use a different animal or adjust '
'the parameters '
'in the "list_of_available_organisms.json"'
'\nDetected filled area: '
+ repr(animal_properties[
special_counter].filled_area)
+ '\nDetected eccentricity: '
+ repr(animal_properties[
special_counter].eccentricity)[0:5]
+ '\nDetected Major/Minor Axis: '
+ repr(animal_properties[
special_counter].major_axis_length
/animal_properties[
special_counter].minor_axis_length)[0:5]
)
#print('self.first_animal.animal_like_object_detected ' +
# repr(self.first_animal.animal_like_object_detected))
first_frame_data = {'filled area': float(self.first_animal.filled_area),
'centroid row': float(self.first_animal.centroid_row),
'centroid col': float(self.first_animal.centroid_col),
'bounding box row min': float(self.first_animal.row_min),
'bounding box row max': float(self.first_animal.row_max),
'bounding box col min': float(self.first_animal.col_min),
'bounding box col max': float(self.first_animal.col_max)
}
# save the first frame data - now as json!
with open('first_frame_data.json', 'w') as file:
json.dump(first_frame_data, file, sort_keys=True, indent=4)
if self.debug_mode:
binary_image_to_plot = thc.CallImageROI(
self.first_image_thresholded, self.first_roi).small_image
self.child = tk.Toplevel()
self.child.grab_set()
self.child.wm_title(
'Largest object will be defined as the animal')
self.child.attributes("-topmost", True) #force on top
child_frame = tk.Frame(
self.child,
width=(binary_image_to_plot.shape[1]*self.resize)*2,
height=binary_image_to_plot.shape[0]*self.resize)
#child_frame.pack()
label_left = tk.Label(self.child,
text = 'Bounding Box around animal')
label_left.grid(row=0, column=0)
child_canvas_left = tk.Canvas(
self.child,
width=binary_image_to_plot.shape[1]*self.resize,
height=binary_image_to_plot.shape[0]*self.resize)
child_canvas_left.grid(row = 1, column = 0)
label_right = tk.Label(self.child,
text = 'Binary image - largest object '
'\n will be defined as the animal')
label_right.grid(row=0, column=1)
child_canvas_right = tk.Canvas(
self.child,
width=binary_image_to_plot.shape[1]*self.resize,
height=binary_image_to_plot.shape[0]*self.resize)
child_canvas_right.grid(row = 1, column = 1)
# here the identified bounding box is drawn around the animal
image_object = thc.DrawBoundingBox(
self.first_image,
self.first_animal,
value=self.box_intensity)
image_to_plot = thc.CallImageROI(
image_object.image_with_box,
self.first_roi).small_image
image_to_plot = transform.resize(
image_to_plot, (
int(image_to_plot.shape[0]*self.resize),
int(image_to_plot.shape[1]*self.resize)),
preserve_range=True,
mode='reflect')
photo_bounding_box = ImageTk.PhotoImage(
image=Image.fromarray(image_to_plot))
child_canvas_left.create_image(
0,0,image=photo_bounding_box, anchor=tk.NW)
converted_binary = binary_image_to_plot.astype(np.uint8)
converted_binary[np.where(converted_binary == 1)] = 255
converted_binary = transform.resize(
converted_binary, (
int(converted_binary.shape[0]*self.resize),
int(converted_binary.shape[1]*self.resize)),
preserve_range=True,
mode='reflect'
)
photo_binary = ImageTk.PhotoImage(
image=Image.fromarray(converted_binary))
child_canvas_right.create_image(
0, 0, image=photo_binary, anchor=tk.NW)
self.child.update()
# The messagebox blocks th execution of the code until
# the user click's ok
messagebox.showinfo(
'PiVR information',
'The first image using local thresholding!'
'\nThe algorithm will use the largest object as the animal'
'\nplease press "OK" to continue'
'\nFollowing identified object has the following parameters:'
'\nFilled Area: '
+ repr(self.first_animal.filled_area)[0:5]
+ '\nMajor over Minor Axis: '
+ repr(
self.first_animal.major_axis
/self.first_animal.minor_axis)[0:5]
+ '\nEccentricity: ' + repr(
self.first_animal.eccentricity)[0:5],
parent=self.child)
self.child.grab_release()
self.child.destroy()
print('Animal defined at Frame: ' + repr(self.counter))
[docs] def animal_left_mode_three(self):
"""
This function compares the first image the algorithm has
taken with the current image. It always subtracts the two
binary (thresholded using the mean +3*STD) images.
The idea is that as soon as the animal has left the original
position, the subtracted image will only have the original
animal left. In other words, the closer this subtracted
image is to the first binary image, the more of the animal
has already left the initial bounding box.
"""
# next stop: take background picture for future reference
iscrossing = False
if self.debug_mode:
# get size of image by taking the image
first_binary = thc.CallImageROI(
self.first_image_thresholded,
self.first_roi).small_image.copy()
self.child = tk.Toplevel()
self.child.grab_set()
self.child.wm_title('Observe the difference between the '
'left and middle image')
self.child.attributes("-topmost", True) # force on top
child_frame = tk.Frame(
self.child,
width=first_binary.shape[1] * self.resize * 3,
height=first_binary.shape[0] * self.resize)
# some text
label_left = tk.Label(self.child,
text='The first image in binary form')
label_left.grid(row=0, column=0)
child_canvas_left = tk.Canvas(
self.child,
width=first_binary.shape[1] * self.resize,
height=first_binary.shape[0] * self.resize)
child_canvas_left.grid(row=1, column=0)
converted_first_binary = first_binary.astype(np.uint8)
# change the 1 to 255
converted_first_binary[np.where(
converted_first_binary == 1)] = 255
# now we resize the image
converted_first_binary = \
transform.resize(converted_first_binary,
(converted_first_binary.shape[0] * self.resize,
converted_first_binary.shape[1] * self.resize),
preserve_range=True, mode='reflect'
)
# Use PIL to make an image Tkinter is able to understand
first_binary_photo = ImageTk.PhotoImage(
image=Image.fromarray(converted_first_binary))
# use that photo to draw in the canvas that was just created
child_canvas_left.create_image(
0,0, image=first_binary_photo, anchor=tk.NW)
label_middle = tk.Label(
self.child, text='The current image in binary form')
label_middle.grid(row=0, column=1)
label_right = tk.Label(
self.child, text='The subtracted images')
label_right.grid(row=0, column=2)
# prepare the other two canvases:
child_canvas_middle = tk.Canvas(
self.child,
width=first_binary.shape[1] * self.resize,
height=first_binary.shape[0] * self.resize)
child_canvas_middle.grid(row=1, column=1)
child_canvas_right = tk.Canvas(
self.child,
width= first_binary.shape[1] * self.resize,
height= first_binary.shape[0] * self.resize)
child_canvas_right.grid(row=1, column=2)
self.child.update()
i_escape_box = self.counter + 1
while iscrossing != True \
and not self.cancel_animal_detection_bool:
self.cancel_button.update()
if Raspberry and not self.offline_analysis:
with PiYUVArray(self.camera) as output:
self.camera.capture(output, 'yuv', use_video_port=True)
current_frame_animal_left = output.array[:, :, 0]
else:
self.counter += 1
if self.images_as_npy:
#current_frame_animal_left = \
# ndimage.filters.gaussian_filter(
# self.images[:,:,self.counter],
# sigma=self.sigma_for_image_filtering)
current_frame_animal_left = \
self.images[:,:,self.counter]
else:
temp = imread(self.images[self.counter])
#current_frame_animal_left = \
# ndimage.filters.gaussian_filter(temp[:,:,0],
# sigma=self.sigma_for_image_filtering)
current_frame_animal_left = temp[:,:,0]
#print(self.counter)
# re-calculate the threshold for this particular image: -
# doesn't work well because changing the threshold
# changes what is identified as the first animal and
# therefore makes it much harder to compare the current
# image to the previous one.
#leaving_box_thresh = \
# thc.MeanThresh(current_frame_animal_left,
# self.signal,
# self.STD,
# self.first_roi)
# depending on the signal (dark or bright) take the
# values less than the threshold, or greater, respectively
# thresholded_escape_box_image = \
# self.compare(
# current_frame_animal_left,
# leaving_box_thresh.thresh)
thresholded_escape_box_image = self.compare(
current_frame_animal_left, self.first_thresh.thresh)
# Essentially subtract the current binary image from the
# first binary image
subtracted_new_frame = np.bitwise_xor(
thresholded_escape_box_image, self.first_image_thresholded)
# extract the properties of object in the bounding box
larval_properties = regionprops(
label(
thc.CallImageROI(
subtracted_new_frame,
self.first_animal).small_image))
# sort those object and find the largest one.
boxed_animal = thc.DescribeLargestObject(
larval_properties, self.first_animal)
# the if clause below just tests if the connected regions
# that were identified have similar properties
# to the original larva.
if boxed_animal.filled_area \
<= self.first_animal.filled_area \
+ self.first_animal.filled_area \
* self.stringency_size \
and boxed_animal.filled_area \
>= self.first_animal.filled_area \
- self.first_animal.filled_area \
* self.stringency_size \
and boxed_animal.centroid_row \
<= self.first_animal.centroid_row \
+ self.first_animal.centroid_row\
* self.stringency_centroid \
and boxed_animal.centroid_row \
>= self.first_animal.centroid_row \
- self.first_animal.centroid_row \
* self.stringency_centroid \
and boxed_animal.centroid_col \
<= self.first_animal.centroid_col \
+ self.first_animal.centroid_col \
* self.stringency_centroid \
and boxed_animal.centroid_col \
>= self.first_animal.centroid_col \
- self.first_animal.centroid_col \
* self.stringency_centroid:
iscrossing = True
# if the subtracted binary image is almost identical
# (controlled with the stringency* parameters)
# the animal has left the original position.
break
else:
i_escape_box +=1
if self.debug_mode:
current_image = thc.CallImageROI(
thresholded_escape_box_image,
self.first_roi).small_image.copy()
current_binary = current_image.astype(np.uint8)
# change the 1 to 255
current_binary[np.where(current_binary == 1)] = 255
# now we resize the image
current_binary = transform.resize(
image=current_binary,
output_shape =(current_binary.shape[0] * self.resize,
current_binary.shape[1] * self.resize),
preserve_range=True,
mode='reflect')
# Use PIL to make an image Tkinter is able to understand
current_photo = ImageTk.PhotoImage(
image=Image.fromarray(current_binary))
# use that photo to draw in the canvas that was just created
child_canvas_middle.create_image(
0, 0, image=current_photo, anchor=tk.NW)
subtracted_image = thc.CallImageROI(
subtracted_new_frame,
self.first_roi).small_image.copy()
subtracted_binary = subtracted_image.astype(np.uint8)
subtracted_binary[np.where(
subtracted_image == 1)] = 255
subtracted_binary = transform.resize(
subtracted_binary,
(subtracted_binary.shape[0] * self.resize,
subtracted_binary.shape[1] * self.resize),
preserve_range=True, mode='reflect'
)
subtracted_photo = ImageTk.PhotoImage(
image=Image.fromarray(subtracted_binary))
child_canvas_right.create_image(
0, 0, image=subtracted_photo, anchor=tk.NW)
self.child.update()
if not Raspberry:
# wait for the display framerate
self.child.after(int(self.interval))
else:
self.child.after(int(200))
if self.debug_mode:
messagebox.showinfo('PiVR information',
'To continue, please click "OK"',
parent=self.child)
self.child.grab_release()
self.child.destroy()
self.next_frame = i_escape_box
if self.debug_mode:
# grab the first image to plot - needed to define the
# size of the the window
first_binary = thc.CallImageROI(
self.first_image_thresholded,
self.first_roi).small_image.copy()
# how much bigger should the image be?
self.child = tk.Toplevel()
self.child.grab_set()
self.child.wm_title('Animal left original location')
self.child.attributes("-topmost", True) # force on top
# create the frame where we can put the canvases inside,
# idea is to have a 2x2 matrix to add the canvases
child_frame = tk.Frame(
self.child,
width=(first_binary.shape[1]*self.resize)*2,
height=(first_binary.shape[0]*self.resize)*2)
# some text
label_left_top = tk.Label(
self.child, text = 'The first image in binary form')
label_left_top.grid(row=0, column=0)
# the first canvas, as big as the image * resize_factor
child_canvas_left_top = tk.Canvas(
self.child,
width=first_binary.shape[1]*self.resize,
height=first_binary.shape[0]*self.resize)
child_canvas_left_top.grid(row=1, column=0)
#thc.CallImageROI(self.first_image_thresholded,
# self.first_roi).small_image # already did that above
# Do some datatype gymnastic for tkinter to be able to plot:
# convert the binary image to int8
converted_first_binary = first_binary.astype(np.uint8)
# change the 1 to 255
converted_first_binary[np.where(converted_first_binary==1)] = 255
# now we resize the image
converted_first_binary = \
transform.resize(
converted_first_binary,
(converted_first_binary.shape[0]*self.resize,
converted_first_binary.shape[1]*self.resize),
preserve_range=True,
mode='reflect')
# Use PIL to make an image Tkinter is able to understand
first_binary_photo = ImageTk.PhotoImage(
image=Image.fromarray(converted_first_binary))
# use that photo to draw in the canvas that was just created
child_canvas_left_top.create_image(
0,0, image=first_binary_photo, anchor=tk.NW)
label_right_top = tk.Label(
self.child,
text = 'The current image in binary form')
label_right_top.grid(row=0, column=1)
# Now the current binary image is called
current_binary = thc.CallImageROI(
thresholded_escape_box_image,
self.first_roi).small_image.copy()
child_canvas_right_top = tk.Canvas(
self.child,
width=current_binary.shape[1]*self.resize,
height=current_binary.shape[0]*self.resize)
child_canvas_right_top.grid(row=1, column=1)
converted_current_binary = current_binary.astype(np.uint8)
converted_current_binary[np.where(
converted_current_binary==1)] = 255
converted_current_binary = \
transform.resize(
converted_current_binary,
(converted_current_binary.shape[0]*self.resize,
converted_current_binary.shape[1]*self.resize),
preserve_range=True, mode='reflect'
)
current_binary_photo = ImageTk.PhotoImage(
image=Image.fromarray(converted_current_binary))
child_canvas_right_top.create_image(
0, 0, image=current_binary_photo, anchor=tk.NW)
label_left_bottom = tk.Label(
self.child, text = 'The subtracted image')
label_left_bottom.grid(row=2, column=0)
subtracted_binary = thc.CallImageROI(
subtracted_new_frame,
self.first_roi).small_image.copy()
child_canvas_left_bottom = tk.Canvas(
self.child,
width=subtracted_binary.shape[1]*self.resize,
height=subtracted_binary.shape[0]*self.resize)
child_canvas_left_bottom.grid(row=3, column=0)
converted_subtracted_binary = \
subtracted_binary.astype(np.uint8)
converted_subtracted_binary[np.where(
converted_subtracted_binary==1)] = 255
converted_subtracted_binary = transform.resize(
converted_subtracted_binary,
(converted_subtracted_binary.shape[0]*self.resize,
converted_subtracted_binary.shape[1]*self.resize),
preserve_range=True, mode='reflect')
current_subtracted_photo = ImageTk.PhotoImage(
image=Image.fromarray(converted_subtracted_binary))
child_canvas_left_bottom.create_image(
0, 0, image=current_subtracted_photo, anchor=tk.NW)
label_right_bottom = tk.Label(
self.child,
text='Check if the box is empty!'
'\n if not, you have to increase stringency')
label_right_bottom.grid(row=2, column=1)
boxed_image_full = thc.DrawBoundingBox(
current_frame_animal_left,
self.first_animal,
value=self.box_intensity)
boxed_image = thc.CallImageROI(
boxed_image_full.image_with_box,
self.first_roi).small_image
child_canvas_right_bottom = tk.Canvas(
self.child,
width=boxed_image.shape[1]*self.resize,
height=boxed_image.shape[0]*self.resize)
child_canvas_right_bottom.grid(row=3, column=1)
boxed_image = transform.resize(
boxed_image,
(boxed_image.shape[0]*self.resize,
boxed_image.shape[1]*self.resize),
preserve_range=True, mode='reflect'
)
boxed_image_photo = ImageTk.PhotoImage(
image = Image.fromarray(boxed_image))
child_canvas_right_bottom.create_image(
0, 0, image=boxed_image_photo, anchor=tk.NW)
self.child.update()
messagebox.showinfo('PiVR information',
'To continue, '
'please click "OK"',
parent=self.child)
self.child.grab_release()
self.child.destroy()
print('Animal left inital bounding box')
[docs] def background_reconstruction_mode_three(self):
"""
After the animal left the original position, take another
picture. Use the bounding box coordinates defined for the first
animal to cut out that area of the new image. Then paste it
into the original position where the animal was.
This leads to an almost perfect background image.
"""
# TODO: Test if it would be good to have a small pause here -
# this would give the animal time to really get out of the
self.next_frame += 1
# now we reconstruct the background
if Raspberry:
with PiYUVArray(self.camera) as output:
self.camera.capture(output, 'yuv', use_video_port=True)
self.post_box_image = output.array[:,:,0]
else:
self.counter += 1
# if not on the Raspberry or if offline analysis, take
# the pre-recorded images
# if the user provded a numpy array us it
if self.images_as_npy:
self.post_box_image = self.images[:, :, self.counter]
# else use the sorted list of names that was
# defined with the user input
else:
temp = imread(self.images[self.counter])
self.post_box_image = temp[:, :, 0]
# Here we take the picture that has been taken after the
# larvae left the initial bbox and fill the original picture
# with the now empty bounding box to get a background without
# a larva
# Note! We make the picture 10% of the boxsize bigger than
# the found regionprops. Sometimes the thresholding does not
# catch faint parts of the larva so this is a safeguard
# against it!
# IT WILL BREAK IF THE BOXSIZE IS TOO BIG
self.goodbackground[self.first_animal.row_min
- int(self.boxsize * 0.1)
:self.first_animal.row_max
+ int(self.boxsize * 0.1),
self.first_animal.col_min
- int(self.boxsize * 0.1)
:self.first_animal.col_max
+ int(self.boxsize * 0.1)]\
= self.post_box_image[self.first_animal.row_min
- int(self.boxsize * 0.1)
:self.first_animal.row_max
+ int(self.boxsize * 0.1),
self.first_animal.col_min
- int(self.boxsize * 0.1)
:self.first_animal.col_max
+ int(self.boxsize * 0.1)]
# also do this for the unfiltered background - just to get a
# nicer looking background image
self.unfiltered_background[self.first_animal.row_min
- int(self.boxsize * 0.1)
:self.first_animal.row_max
+ int(self.boxsize * 0.1),
self.first_animal.col_min
- int(self.boxsize * 0.1)
:self.first_animal.col_max
+ int(self.boxsize * 0.1)] \
= self.post_box_image[self.first_animal.row_min
- int(self.boxsize * 0.1)
:self.first_animal.row_max
+ int(self.boxsize * 0.1),
self.first_animal.col_min
- int(self.boxsize * 0.1)
:self.first_animal.col_max
+ int(self.boxsize * 0.1)]
self.smoothed_goodbackground = \
ndimage.filters.gaussian_filter(self.goodbackground, sigma=1)
if self.debug_mode:
# get size of image by referencing the image we want to plot
background_reconstructed_zoom = thc.CallImageROI(
self.smoothed_goodbackground, self.first_roi).small_image
self.child = tk.Toplevel()
self.child.grab_set()
self.child.wm_title('Reconstructed image without animal '
'\nif you see the animal that means '
'that the \nbackground reconstructio '
'did not work '
'well')
self.child.attributes("-topmost", True) # force on top
child_frame = tk.Frame(
self.child,
width=background_reconstructed_zoom.shape[1] * self.resize,
height=background_reconstructed_zoom.shape[0] * self.resize)
# some text
tk_label = tk.Label(self.child,
text='Background image zoom')
tk_label.grid(row=0, column=0)
child_canvas = tk.Canvas(
self.child,
width=background_reconstructed_zoom.shape[1] * self.resize,
height=background_reconstructed_zoom.shape[0] * self.resize)
child_canvas.grid(row=1, column=0)
background_reconstructed_zoom = transform.resize(
background_reconstructed_zoom,
(background_reconstructed_zoom.shape[0]*self.resize,
background_reconstructed_zoom.shape[1]*self.resize),
preserve_range=True, mode='reflect'
)
photo = ImageTk.PhotoImage(
image=Image.fromarray(background_reconstructed_zoom))
child_canvas.create_image(0, 0, image=photo, anchor=tk.NW)
messagebox.showinfo('PiVR information',
'To continue, please click "OK"',
parent=self.child)
self.child.grab_release()
self.child.destroy()
imageio.imsave('Background.tiff', self.unfiltered_background)
#imageio.imsave('Background.jpg', self.unfiltered_background)
print('Background reconstruction completed and image saved')
[docs] def animal_after_box_mode_three(self):
'''
After making sure that the animal left the original position,
we have to find it again.
'''
smoothed_current_local_image = \
ndimage.filters.gaussian_filter(
thc.CallImageROI(
self.post_box_image,
self.first_roi,
self.boxsize
).small_image, sigma=1)
smoothed_background_local_image = \
thc.CallImageROI(
self.smoothed_goodbackground,
self.first_roi,
self.boxsize).small_image
subtracted_current_frame = \
smoothed_current_local_image.astype(np.int16) - \
smoothed_background_local_image.astype(np.int16)
local_thresh = thc.MeanThresh(
subtracted_current_frame, self.signal, 3)
if len(regionprops(label(subtracted_current_frame
> local_thresh.thresh))) == 0:
# In case there are zero connected pixels, we assume
# that's because the 3 STDs are too much and go with
# only two
local_thresh = thc.MeanThresh(
subtracted_current_frame, self.signal, 2)
# this is the thresholded first image we'll use for defining
# the area occupied by the animal
current_image_thresholded = self.compare(
subtracted_current_frame, local_thresh.thresh)
larval_properties_after_moved = regionprops(
label(current_image_thresholded))
self.animal_after_box = thc.DescribeLargestObject(
larval_properties_after_moved,
self.first_roi,
self.boxsize)
animal_left_box_data = {
'filled area': float(self.animal_after_box.filled_area),
'centroid row': float(self.animal_after_box.centroid_row),
'centroid col': float(self.animal_after_box.centroid_col),
'bounding box row min': float(self.animal_after_box.row_min),
'bounding box row max': float(self.animal_after_box.row_max),
'bounding box col min': float(self.animal_after_box.col_min),
'bounding box col max': float(self.animal_after_box.col_max)
}
# save the first frame data - now as json!
with open('second_frame_data.json', 'w') as file:
json.dump(animal_left_box_data, file, sort_keys=True, indent=4)
if self.debug_mode:
# get size of image by referencing the image we want to plot
post_box_image = thc.DrawBoundingBox(
self.post_box_image,
self.animal_after_box,
value = self.box_intensity)
post_box_image = thc.CallImageROI(
post_box_image.image_with_box,
self.first_roi).small_image
self.child = tk.Toplevel()
self.child.grab_set()
self.child.wm_title(
'Reconstructed image without animal'
'\nif you see the animal that means that the '
'\nbackground reconstruction did not work well')
self.child.attributes("-topmost", True) # force on top
child_frame = tk.Frame(
self.child,
width=post_box_image.shape[1] * self.resize,
height=post_box_image.shape[0] * self.resize)
# some text
tk_label = tk.Label(self.child,
text='Background image zoom')
tk_label.grid(row=0, column=0)
child_canvas = tk.Canvas(
self.child,
width=post_box_image.shape[1] * self.resize,
height=post_box_image.shape[0] * self.resize)
child_canvas.grid(row=1, column=0)
post_box_image = transform.resize(
post_box_image,
(post_box_image.shape[0]*self.resize,
post_box_image.shape[1]*self.resize),
preserve_range=True, mode='reflect'
)
photo = ImageTk.PhotoImage(
image=Image.fromarray(post_box_image))
child_canvas.create_image(
0, 0, image=photo, anchor=tk.NW)
messagebox.showinfo('PiVR information',
'To continue, please click "OK"',
parent=self.child)
self.child.grab_release()
self.child.destroy()
if self.child_preexperiment is not None:
self.child_preexperiment.destroy()
self.animal_detected = True
[docs] def define_animal_post_hoc(self):
"""
With the information where to look, the animal can be
identified using local thresholding.
"""
# subtract the current image from the mean image - while
# images are uint8 (values go from 0 to 255) it becomes
# necessary to work in a larger number spaces as subtraction
# can lead to buffer overflow (e.g. 100 - 250 = ? (probably
# 155) instead of -150
# Int16 solves this problem
first_subtracted_image = \
self.mean_image.astype(np.int16) \
- self.images[:, :, self.counter].astype(np.int16)
# Had sigma the standard deviation before. If background
# changes over time it breaks the script here!
# As this is only for post-hoc analysis and in any normal
# experiment the expectation of sigma is to be almost
# zero (in an ideal world) the sigma will be set to 1 from
# now on
smoothed_image = ndimage.filters.gaussian_filter(
first_subtracted_image, sigma=1)
# now we should have a gaussian distribution of pixel
# intensity. we just take mean of the smoothened image - a
# given factor * the standard deviation subtracted,
# unsmoothened image
if self.signal == 'white':
self.first_image_thresholded = smoothed_image \
< self.overall_threshold
else:
self.first_image_thresholded = smoothed_image \
> self.overall_threshold
# Call the binary image and identify connected regions only
# in the defined roi
animal_properties = regionprops(
label(
thc.CallImageROI(
self.first_image_thresholded,
self.first_roi).small_image
))
if self.debug_mode:
print(len(animal_properties))
for special_counter in range(len(animal_properties)):
try:
print('in define animal: '
+ repr(special_counter)
+ ' Filled Area: '
+ repr(animal_properties[
special_counter].filled_area)
+ ' eccentricity: '
+ repr(animal_properties[
special_counter].eccentricity)[0:5]
+ ' major over minor: '
+ repr(animal_properties[
special_counter].major_axis_length
/animal_properties[
special_counter].minor_axis_length)[0:5])
except ZeroDivisionError:
print(repr(special_counter) + ' minor axis was zero')
pass
# Check all the connected regions and define the largest
# object as the animal
self.first_animal = thc.DescribeLargestObject(
animal_properties,
self.first_roi,
animal_like=False,
filled_area_min=self.filled_area_min,
filled_area_max=self.filled_area_max,
eccentricity_min=self.eccentricity_min,
eccentricity_max=self.eccentricity_max,
major_over_minor_axis_min=self.major_over_minor_axis_min,
major_over_minor_axis_max=self.major_over_minor_axis_max
)
self.first_row_min = self.first_animal.row_min
self.first_row_max = self.first_animal.row_max
self.first_col_min = self.first_animal.col_min
self.first_col_max = self.first_animal.col_max
first_frame_data = {'filled area': float(self.first_animal.filled_area),
'centroid row': float(self.first_animal.centroid_row),
'centroid col': float(self.first_animal.centroid_col),
'bounding box row min': float(self.first_animal.row_min),
'bounding box row max': float(self.first_animal.row_max),
'bounding box col min': float(self.first_animal.col_min),
'bounding box col max': float(self.first_animal.col_max)
}
# save the first frame data - now as json!
with open('first_frame_data.json', 'w') as file:
json.dump(first_frame_data, file, sort_keys=True, indent=4)
if self.debug_mode:
binary_image_to_plot = thc.CallImageROI(
self.first_image_thresholded,
self.first_roi).small_image
self.child = tk.Toplevel()
self.child.grab_set()
self.child.wm_title(
'Largest object will be defined as the animal')
self.child.attributes("-topmost", True) # force as top
# window
child_frame = tk.Frame(
self.child,
width=(binary_image_to_plot.shape[1] * self.resize) * 2,
height=binary_image_to_plot.shape[0] * self.resize)
# child_frame.pack()
label_left = tk.Label(self.child, text='Bounding Box '
'around animal')
label_left.grid(row=0, column=0)
child_canvas_left = tk.Canvas(
self.child,
width=binary_image_to_plot.shape[1] * self.resize,
height=binary_image_to_plot.shape[0] * self.resize)
child_canvas_left.grid(row=1, column=0)
label_right = tk.Label(self.child,
text='Binary image - largest object '
'\n will be defined as the animal')
label_right.grid(row=0, column=1)
child_canvas_right = tk.Canvas(
self.child,
width=binary_image_to_plot.shape[1] * self.resize,
height=binary_image_to_plot.shape[0] * self.resize)
child_canvas_right.grid(row=1, column=1)
# here the identified bounding box is drawn
# around the animal
image_object = thc.DrawBoundingBox(
self.first_image,
self.first_animal,
value=self.box_intensity)
image_to_plot = thc.CallImageROI(
image_object.image_with_box,
self.first_roi).small_image
image_to_plot = transform.resize(
image_to_plot, (int(image_to_plot.shape[0] * self.resize),
int(image_to_plot.shape[1] * self.resize)),
preserve_range=True, mode='reflect')
photo_bounding_box = ImageTk.PhotoImage(
image=Image.fromarray(image_to_plot))
child_canvas_left.create_image(
0, 0, image=photo_bounding_box, anchor=tk.NW)
converted_binary = binary_image_to_plot.astype(np.uint8)
converted_binary[np.where(converted_binary == 1)] = 255
converted_binary = transform.resize(
converted_binary, (int(converted_binary.shape[0] * self.resize),
int(converted_binary.shape[1] * self.resize)),
preserve_range=True, mode='reflect'
)
photo_binary = ImageTk.PhotoImage(
image=Image.fromarray(converted_binary))
child_canvas_right.create_image(
0, 0, image=photo_binary, anchor=tk.NW)
self.child.update()
# The messagebox blocks th execution of the code until
# the user click's ok
messagebox.showinfo('PiVR information',
'The first image using local '
'thresholding!'
'\n The algorithm will use the '
'largest object as the animal'
'\n please press "OK" to continue',
parent=self.child)
self.child.grab_release()
self.child.destroy()
self.animal_detected = True
print('Animal defined')
print('counter ' + repr(self.counter))
[docs] def error_message_pre_exp_func(self, error_stack):
"""
Whenever something goes wrong during animal detection this
function is called. It writes the traceback of the
error into a file called _ERROR_animal_detection.txt in the
experimental folder.
"""
if self.child_preexperiment is not None:
self.child_preexperiment.destroy()
with open(self.datetime
+ '_ERROR_animal_detection.txt', 'a') as file:
file.write('Unexpected Error at while detecting the animal'
'\n\n'
'Animal detection mode used: '
+ self.animal_detection_mode +
'\n\n')
file.write('Traceback (most recent call last): ' +
str(error_stack) +
'\n\n')
if "ValueError: zero-size array to reduction operation" \
"maximum which has no identity" in traceback.format_exc()\
and "larval_properties = regionprops(label(" \
"thc.CallImageROI(subtracted_new_frame, " \
"self.first_animal).small_image))" \
in traceback.format_exc() \
and not self.first_animal.animal_like_object_detected:
file.write('Error #3\n'
'The following 2 lines:\n'
"ValueError: zero-size array to reduction "
"operation maximum which has no identity\n"
"larval_properties = regionprops(label("
"thc.CallImageROI(subtracted_new_frame, "
"self.first_animal).small_image))\n"
'and the fact that the variable '
'"self.first_animal.animal_like object" '
'is set to "False"\n'
'indicate that no animal could be '
'identified in the function '
'"define_animal_mode_three".\n'
'There are at least three possible reasons '
'for this:\n'
'1) If you are using established organism '
'parameters it likely means that the animal\n'
'could not be separated from the background. '
'This can happen if the animal is close to\n'
'the edge of the arena or if the arena is '
'not clear.\n'
'To solve this either re-design your arena '
'or use Animal Detection Mode 1\n'
'(or possibly Mode 2)\n'
'2) The pixel/mm factor is incorrectly '
'defined\n'
'3) You are in the process of defining '
'heuristics for a new animal and you need to adjust'
'those further\n')
# elif.... put all the known errors and print possible solutions
else:
file.write('Error that has not been classfied yet!\n\n')
file.write('Full error traceback below: \n\n')
file.write(traceback.format_exc())
tk.messagebox.showerror(
'Error',
'There was an error while identifying the animal.\n'
'See the ERROR_animal_detection.txt file in the\n'
'experiment folder for a detailed traceback for\n'
'debugging purposes\n'
)
if self.debug_mode:
self.child.grab_release()
self.child.destroy()
if Raspberry and not self.post_hoc_tracking:
self.child_preexperiment.grab_release()
self.child_preexperiment.destroy()
[docs] def cancel_animal_detection_func(self):
"""
When the user presses the cancel button, it turns the Boolean value to
'True' which cancels the detection of the animal in the next valid step.
"""
self.cancel_animal_detection_bool = True