__author__ = 'David Tadres'
__project__ = 'PiVR'
import numpy as np
import tkinter as tk
import os
from matplotlib.figure import Figure
from scipy import ndimage
import matplotlib.backends.backend_tkagg as tkagg
import time
from tkinter import messagebox
from skimage import measure
from skimage.draw import line
from matplotlib.patches import Rectangle
from matplotlib import cm
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
import pandas as pd
import json
# Local Modules
import initialize_image_data
large_plots = False
[docs]class MultiAnimalTracking():
"""
The Multi-Animal Tracker allows the identification and
tracking of several animals in a video or image series.
This tracker depends on user input, specifically:
#. The user should identify the region in the frame where the
animals are to be expected. This helps reduce
mis-identification of structures outside that area as
animals.
#. The user should optimize the detection by using the
'Treshold (STD from Mean)" slider. When doing background
subtraction, the current image is subtracted from the mean
image. The treshold defined using this slider defines how
many standard deviations (e.g. 5 x Standard Deviation)
from the mean value of pixel intensities of the subtracted
image the animals are expected. In other words - the
clearer your animals stand out (large contrast) the higher
the treshold can be set.
#. The "Minimum filled area" slider gives the user a handle
on the animal size: After background subtraction and
applying the threshold (see above) the algorithm goes
through all the
`"blobs" <https://scikit-image.org/docs/dev/api/skimage.measure.html#skimage.measure.regionprops>`_.
To determine whether a given blob counts as an animal it
compares the number of fixels and compares it to this
Minimum filled area. A blob will only count as an animal
if it contains equal or more pixels as defined here.
#. The "Maximum filled area" slider gives the user a handle
on the animal size by defining the maximum area (in
pixels) the animal has (see above).
#. The "Major over Minor Axis" slider lets the user select
for "elongated" objects. The Major and Minor axis are
properties of the
`"blob" <https://scikit-image.org/docs/dev/api/skimage.measure.html#skimage.measure.regionprops>`_.
For animal that are often round (such as fruit fly larva)
it is best to keep this parameter at zero. For animals
that are rigid such as adult fruit flies, it can be useful
set this slider to a number higher than one.
#. The "Max Speed Animal [mm/s]" is used during tracking to
define realistic travelled distances between two frames.
To calculate this, the script takes the pixel/mm and the
frame rate as recorded in "experiment_settings.json" into
account.
For example, if you have a fruit fly larva that
moves not faster than 2mm/s and you have recorded a video
at 5 frames per second at a distance (camera to animals)
translating to 5pixel/mm at your chosen resolution a blob
can not move more than (2mm/s*5pixel/mm)/5 frames per
second = 2 pixel per frame.
.. warning::
This feature can lead to unexpected results. If your
trajectories look unexpected, try relaxing this parameter
(=put a large number, e.g. 200)
#. The "Select Rectangular ROI" is a important feature: it
allows the selection of a rectangular area using the mouse in
the main window. When looking for animals, only the area
inside this area is taken into consideration.
#. The main window displays the current frame defined by pulling
the slider next to "Start Playing". This can be used to
optimize the "Image parameters" described above. To just watch
the video you can of course also press the "Start Playing"
button.
The multi-animal tracking algorithm critically depends on the optimal
image parameters which means that for optimal results **each
frame should contain the expected number of animals**. For
example, if you are running an experiment with 5 animals the goal
is to adjust the image parameters such that for each frame you
will have 5 animals. See :ref:`here <ToolsMultianimalTracking>`
on how to best achieve this.
To help the user find frames where the number of animals is
incorrect, the button "Auto-detect blobs" can be very useful. It
detects, in each frame, the number of
`"blobs" <https://scikit-image.org/docs/dev/api/skimage.measure.html#skimage.measure.regionprops>`_.
that fit the image parameters irrespective of distance travelled.
See :func:`MultiAnimalTracking.detect_blobs` for details on what
that function is doing exactly.
Once the user presses the "Track Animals" button, the
:func:`MultiAnimalTracking.ask_user_correct_animal_classification`
function is called. This function uses the current frame and
applies the user defined image parameters to determine the number
of animals used in the experiment. It then shows a popup
indicating the blobs identified as animals and ask the user if
this is correct.
If the user decides to go ahead with tracking, the actual
tracking algorithm starts. The principle of this multi-animal
tracker is the following:
#. User has defined the number of expected animals by choosing a
frame (i.e. Frame # 50) where the correct number of animals
can be identified.
#. A numpy array with the correct space for storing X and Y
coordinates for all these animals for each frame is
pre-allocated
#. In the user defined frame (i.e. Frame # 50), the position of
each animal is identified.
#. The centroid position for each animal is stored in the
pre-allocated array. The order is from identified animal top
left to bottom right. I.e. the animal that is top left in the
image in i.e. Frame #50 will be in position #1 in the numpy
array.
#. As the user defined frame does not have to be the first frame,
the tracking algorithm can run "backwards", i.e. identifying
animals in frame 50, 49, 48... and once it reaches zero it
will run forward, in our example 51, 52 ...
#. In the next frame (which can also be the previous frame as the
analysis can run backwards), the blobs that can be animals are
again identified using animal parameters. In our example where
the starting frame was 50, the "next" frame to be analyzed is 49.
#. The centroids in frame 49 are assigned to the previously
identified frame by calculating the distance of each centroid
to each of the previously identified centroids. Centroids with
the smallest distance are assumed to be from the same animal.
#. In many multi-animal experiments, animal can touch each other
which makes it impossible for the algorithm to distinguish
them. For a frame where 2 (or more) touch each other,
only one centroid can be assigned to the touching animals.
#. Once the animals do not touch anymore, they can be re-idenfied
as single animals. To assign them to their previous
trajectory the distance to the previously known position of
the animal that was lost before.
However, for the time that the animal is missing,
no assumptions are made and the data is just missing.
"""
def __init__(self,
data_path,
colormap,
recording_framerate,
organisms_and_heuristics):
self.path = data_path
os.chdir(self.path)
# self.csv_data = csv_data
# self.background_image = background_image
# self.data_path = data_path
self.recording_framerate = recording_framerate
self.colormap = colormap
self.update_overview_bool = True
self.playback_speed_options = ('0.1X', '0.5X', '1X', '2X',
'5X', 'Custom')
self.playback_speed_variable = tk.StringVar()
self.playback_speed_variable.set(self.playback_speed_options[2])
self.playback_speed_value = 1.0
try:
with open(('experiment_settings.json'), 'r') as file:
experiment_variables = json.load(file)
try:
self.model_organism = experiment_variables['Model Organism']
self.pixel_per_mm = experiment_variables['Pixel per mm']
except KeyError:
print('Model Organism and Pixel per mm not found in '
'in experimental_settings.json')
self.model_organism = 'unknown'
self.pixel_per_mm = None # put the proper number in here
except FileNotFoundError:
self.model_organism = 'unknown'
self.pixel_per_mm = None # put the proper number in here
self.images, framerate = initialize_image_data.get_self_images()
try:
# takes forever to calculate - save
self.background = np.load('Background.npy') # yes,
# I tried to save with imageio, trouble with reading the
# tiff!
self.smooth_images = np.load('smoothed_images.npy')
except (FileNotFoundError, OSError):
self.smooth_images = ndimage.filters.gaussian_filter(
self.images, sigma=0.5)
#np.save('smoothed_images.npy', self.smooth_images)
self.background = np.mean(self.smooth_images, axis=2)
np.save('Background.npy', self.background)
self.images = None # free memory!
try:
self.timestamps = np.load('timestamps.npy')
except FileNotFoundError:
self.timestamps = None
self.image_number = tk.IntVar()
self.image_number.set(0)
self.callback_func_variable = 0
self.threshold_number = tk.IntVar()
self.threshold_number.set(5)
self.minimum_filled_area_number = tk.IntVar()
self.minimum_filled_area_number.set(20)
self.maximum_filled_area_number = tk.IntVar()
self.maximum_filled_area_number.set(200)
self.major_over_minor_axis_number = tk.DoubleVar()
self.major_over_minor_axis_number.set(1)
self.number_of_blobs = []
self.manually_jump_to_frame_number = tk.IntVar()
# the first ROI is the whole image
self.ROI = [[0,0,self.background.shape[0],
self.background.shape[1]]]
self.ROI_selected = False
self.x_release = None
self.y_release = None
self.x_press = None
self.y_press = None
self.cidpress = None
self.cidrelease = None
self.rect = Rectangle((0, 0), 1, 1, alpha=0.1, fill=False,
hatch='/')
# tracking data
self.centroids = None
self.bounding_boxes = None
self.number_of_animals = tk.IntVar()
self.number_of_animals.set(10)
self.thresholded_images = None
self.max_speed_number = tk.DoubleVar()
try:
self.max_speed_number.set(
organisms_and_heuristics[self.model_organism]
['max_speed_animal_mm_per_s'])
except KeyError:
if self.pixel_per_mm is None or self.pixel_per_mm == 0:
tk.messagebox.showinfo(
'No max speed found', 'Did not find ' +
self.model_organism + ' in\n'
'list_of_available_organisms.json.\n'
'In addition, pixel per mm is not defined\n'
'Could not set max speed automatically. Was set\n'
'to 10px/s - please change as needed')
self.max_speed_number.set(10)
else:
tk.messagebox.showinfo(
'No max speed found','Did not find ' +
self.model_organism + ' in\n'
'list_of_available_organisms.json. '
'Could not set max speed automatically. Was set\n'
'to 2mm/s - please change as needed')
self.max_speed_number.set(2)
# plotting
# colormap
self.cmap = None
# text artist container
self.text_artists = []
# scatterplot_artist container
self.scat_artists = []
# ROI artist
self.ROI_artist = None
self.initial_centroids = []
self.initial_bounding_boxes = []
self.child = tk.Toplevel()
self.child.grab_set()
self.child.wm_title('Semi-Automatic Multi-Animal Tracking')
self.child.protocol("WM_DELETE_WINDOW", self.quit_func)
self.child_frame = tk.Frame(self.child)
self.child_frame.grid(row=0, column=0)
# The overview
if large_plots:
self.fig_overview = Figure(figsize=(11, 7))
else:
self.fig_overview = Figure(figsize=(6, 4))
self.ax_overview = self.fig_overview.add_subplot(111)
self.image_of_background = self.ax_overview.imshow(
self.background, vmin=0, vmax=255, cmap=self.colormap)
# self.image_of_background = self.ax_overview.imshow(
# np.mean(self.thresholded_images, axis=2))
# turn off the labels and axis to save space
# self.ax_overview.axes.get_xaxis().set_ticks([])
# self.ax_overview.axes.get_yaxis().set_ticks([])
# turn on the grid - a bit buggy when user moves around using
# the toolbar
# self.ax_overview.grid()
self.fig_overview.tight_layout()
# bind the plot to the GUI - do it in a new frame due to the
# inherent pack method of NavigationToolbar
overview_frame = tk.Frame(self.child_frame)
overview_frame.grid(row=1, column=1, rowspan=3, columnspan=5)
self.update_overview_button = tk.Button(
overview_frame, text='Updating Overview',
command=self.update_overview_func)
self.update_overview_button.pack()
self.canvas_overview = tkagg.FigureCanvasTkAgg(
self.fig_overview, master=overview_frame)
self.canvas_overview.draw()
self.canvas_overview_background = \
self.canvas_overview.copy_from_bbox(self.ax_overview.bbox)
# Add the toolbar
overview_toolbar = tkagg.NavigationToolbar2Tk(
self.canvas_overview, overview_frame)
overview_toolbar.update()
# The next line is necessary to actually show the figure
self.canvas_overview.get_tk_widget().pack()
# plot to the right of the image - displays how many blobs
# are identified in each frame. This should just help the
# user to get an idea where to look if there are too many/too
# few blobs. Crashed/run out animals will be handled afterwards.
if large_plots:
self.fig_blob_plot = Figure(figsize=(2,7))
else:
self.fig_blob_plot = Figure(figsize=(2,4))
self.ax_blob_plot = self.fig_blob_plot.add_subplot(111)
# self.ax_blob_plot.set_ylim(0,self.smooth_images.shape[0])
self.ax_blob_plot.set_xlim(0,self.number_of_animals.get()) # todo - put animal number
self.ax_blob_plot.set_ylabel('Frame number')
self.ax_blob_plot.set_xlabel('# of animals')
# need to plot something to get the object
self.blob_plot_background, = self.ax_blob_plot.plot(
np.zeros(self.smooth_images.shape[2]),
np.arange(0,self.smooth_images.shape[2],1))
self.blob_plot_indicator, = self.ax_blob_plot.plot(
[0,int(2*self.number_of_animals.get())],
[0,0], color='r',lw=1, alpha=0.8,zorder=0, linestyle=':')
#print(int(np.ceil(number_of_animals+number_of_animals*0.1)))
self.fig_blob_plot.tight_layout()
# bind the plot to the GUI - do it in a new frame due to the
# inherent pack method of NaviagationToolbar
blob_plot_frame = tk.Frame(self.child_frame)
blob_plot_frame.grid(row=1, column=6, rowspan=3)
self.canvas_blob_plot = tkagg.FigureCanvasTkAgg(
self.fig_blob_plot, master=blob_plot_frame)
self.canvas_blob_plot.draw()
# The next line is necessary to actually show the figure
self.canvas_blob_plot.get_tk_widget().pack()
# Button that will call the automated blob detection
self.blob_detection_button = tk.Button(
blob_plot_frame,
text='Auto-detect\nblobs',
command=self.detect_blobs)
self.blob_detection_button.pack()
# track animals
self.track_animals_frame = tk.Frame(self.child_frame,
relief=tk.RIDGE)
self.track_animals_frame.grid(row=5, column=6, rowspan=2)
self.number_of_animals_label = tk.Label(
self.track_animals_frame,
text='# of animals in\ncurrent frame')
self.number_of_animals_label.grid(row=0, column=0)
self.number_of_animals_label_number = tk.Label(
self.track_animals_frame,
textvariable=self.number_of_animals)
self.number_of_animals_label_number.grid(row=0, column=1)
self.number_of_animals_label_number.config(
font=("Arial", 20, "bold")) # increase size of font for better readibility
#self.number_of_animals_entry = tk.Entry(
# self.track_animals_frame,
# textvariable=self.number_of_animals)
#self.number_of_animals_entry.grid(row=1, column=0)
self.track_animals_button = tk.Button(
self.track_animals_frame,
text = 'Track animals',
command=self.ask_user_correct_animal_classification)
self.track_animals_button.grid(row=1, column=0, columnspan = 2)
# Button to interpolate
self.interpolate_button = tk.Button(
self.track_animals_frame,
text = 'Interpolate centroids',
state = tk.DISABLED,
command=self.interpolate)
self.interpolate_button.grid(row=2, column=0, columnspan = 3)
# Optimize parameters for automated detection
self.detection_frame = tk.LabelFrame(
self.child_frame,
text='Image parameters')
self.detection_frame.grid(row=0,column=0, rowspan=5)
# scale to choose number of STDs from mean to threshold
self.threshold_scale = tk.Scale(
self.detection_frame, from_=0, to=10, resolution=1,
label='Threshold (STDs from Mean)',
variable=self.threshold_number,
orient='horizontal', len=200,
command=self.update_visualization
)
self.threshold_scale.grid(row=0, column=0)
# print('just initialized threshold_scale')
# scale to choose minimum filled area
self.minimum_filled_area_scale = tk.Scale(
self.detection_frame, from_=0, to=100, resolution=1,
label='Minimum Filled Area',
variable=self.minimum_filled_area_number,
orient='horizontal', len=200,
command=self.update_visualization
)
self.minimum_filled_area_scale.grid(row=1, column=0)
# print('just initialized minimum_filled_area_scale')
# scale to choose maximum filled area
self.maximum_filled_area_scale = tk.Scale(
self.detection_frame, from_=0, to=400, resolution=1,
label='Maximum Filled Area',
variable=self.maximum_filled_area_number,
orient='horizontal', len=200,
command=self.update_visualization
)
self.maximum_filled_area_scale.grid(row=2, column=0)
# print('just initialized maximum_filled_area_scale')
# scale to choose length ratio
self.major_over_minor_axis_scale = tk.Scale(
self.detection_frame, from_=1, to=10, resolution=0.1,
label='Major over Minor Axis',
variable=self.major_over_minor_axis_number,
orient='horizontal', len=200,
command=self.update_visualization
)
self.major_over_minor_axis_scale.grid(row=3, column=0)
if self.pixel_per_mm is None:
# Entry to choose max speed of the animal
self.max_speed_label = tk.Label(self.detection_frame,
text='Max Speed Animal ['
'px/s]')
self.max_speed_label.grid(row=4, column=0)
else:
# Entry to choose max speed of the animal
self.max_speed_label = tk.Label(self.detection_frame,
text='Max Speed Animal '
'[mm/s]')
self.max_speed_label.grid(row=4, column=0)
self.max_speed_Entry = tk.Entry(
self.detection_frame,
textvariable=self.max_speed_number,
width = 5)
self.max_speed_Entry.grid(row=5, column=0)
self.select_roi_button = tk.Button(
self.detection_frame,
text='Select Rectangular ROI',
command=self.draw_rectangle)
self.select_roi_button.grid(row=6, column=0)
self.select_roi_button['bg'] = 'light grey'
self.play_frame = tk.Frame(
self.child_frame,
relief='groove',
borderwidth=2)
self.play_frame.grid(row=6, column=1, columnspan=5, sticky='w')
# button for play
self.play_button = tk.Button(
self.play_frame,
text='Start Playing',
command=self.play_func)
self.play_button.grid(row=0, column=0)
# scale to choose where to play
self.image_number_scale = tk.Scale(
self.play_frame, from_=0, to=self.smooth_images.shape[2] - 1, resolution=1,
label='Frame shown',
variable=self.image_number,
orient='horizontal', len=400,
command=self.update_visualization)
self.image_number_scale.grid(row=0, column=1, columnspan=3)
self.image_number_scale.set(self.image_number.get())
# print('just initialized image_number_scale')
self.playback_speed_frame = tk.Frame(
self.child_frame, relief='groove', borderwidth=2)
self.playback_speed_frame.grid(
row=7, column=1, columnspan=5, sticky='w')
# Information about recording framerate
self.recording_framerate_label = tk.Label(
self.playback_speed_frame,
text='Experiment was recorded with ' + repr(
self.recording_framerate)
+ 'fps and is being played back at')
self.recording_framerate_label.grid(row=0, column=0)
# menu for speed
self.speed_menu = tk.OptionMenu(self.playback_speed_frame,
self.playback_speed_variable,
*self.playback_speed_options)
self.speed_menu.grid(row=0, column=1)
# more info about recording framerate
self.recording_framerate_label_two = tk.Label(
self.playback_speed_frame, text='speed')
self.recording_framerate_label_two.grid(row=0, column=2)
self.manual_jump_frame = tk.Frame(
self.child_frame, relief='groove', borderwidth=2)
self.manual_jump_frame.grid(row=8, column=1, sticky='w')
# Let user manually enter a frame they want to jump to
self.manually_jump_to_frame_label = tk.Label(
self.manual_jump_frame,
text='Enter a frame you want to jump to')
self.manually_jump_to_frame_label.grid(row=0, column=0)
self.manually_jump_to_frame_text = tk.Entry(
self.manual_jump_frame,
textvariable = self.manually_jump_to_frame_number,
width=5, )
self.manually_jump_to_frame_text.grid(row=0, column=1)
self.manually_jump_to_frame_button = tk.Button(
self.manual_jump_frame, text='Jump to frame',
command=self.manually_jump_to_frame_func)
self.manually_jump_to_frame_button.grid(row=0, column=2)
# start by playing the recorded experiment
self.child.after(100, self.update_visualization())
self.child.after(100, self.callback_func())
[docs] def quit_func(self):
"""
In order to quit this window and go back to the main GUI,
the user needs to press the 'quit' button and this function
will be called.
"""
if tk.messagebox.askokcancel("Quit", "Do you want quit?"):
# set main window active again
self.child.grab_release()
# close the child window
self.child.after(0, self.child.destroy())
[docs] def draw_rectangle(self):
"""
When the user presses the "Select rectangle" Button,
this function is called.
It connects the mouse button press and release events.
Call :func:`MultiAnimalTracking.on_press` and
:func:`MultiAnimalTracking.on_release`
"""
self.ROI_selected = True
self.select_roi_button['bg'] = 'red'
self.cidpress = self.ax_overview.figure.canvas.mpl_connect(
'button_press_event', self.on_press)
self.cidrelease = self.ax_overview.figure.canvas.mpl_connect(
'button_release_event', self.on_release)
[docs] def on_press(self, event):
"""
Saves x and y position when user presses mouse button on main
window
"""
self.x_press = int(round(event.xdata))
self.y_press = int(round(event.ydata))
[docs] def on_release(self, event):
"""
Saves x and y position when user releases mouse button on
main window.
Also takes care of updating the main window with the new ROI
"""
self.x_release = int(round(event.xdata))
self.y_release = int(round(event.ydata))
if self.y_press < self.y_release:
row_min = self.y_press
row_max = self.y_release
else:
row_min = self.y_release
row_max = self.y_press
if self.x_press < self.x_release:
column_min = self.x_press
column_max = self.x_release
else:
column_max = self.x_press
column_min = self.x_release
self.ROI.append([row_min, column_min, row_max, column_max])
print(self.ROI)
self.rect.set_width(column_max - column_min)
self.rect.set_height(row_max - row_min)
self.rect.set_xy((column_min, row_min))
self.update_visualization()
self.ax_overview.figure.canvas.mpl_disconnect(self.cidpress)
self.ax_overview.figure.canvas.mpl_disconnect(self.cidrelease)
self.select_roi_button['bg'] = 'light grey'
[docs] def update_visualization(self, scale_input=None):
"""
Updates the embedded matplotlib plots by setting the data to
the current image_number
"""
print('called update visualization')
# reset number of animals
self.number_of_animals.set(0)
if self.update_overview_bool:
# print('updating image')
# subtract image by converting both the background and
# the smoothed images to int16 (from uint8)
subtracted_image = self.background.astype(np.int16) - \
self.smooth_images[:,:,self.image_number.get()].astype(np.int16)
# calculate the mean and STD
mean_image = np.nanmean(subtracted_image)
std_image = np.std(subtracted_image)
# threshold as defined by the slider
thresholded_image = subtracted_image[:,:] \
> mean_image + self.threshold_number.get() \
* std_image
# identify all the blobs
blobs = measure.regionprops(measure.label(thresholded_image))
self.image_to_plot = \
self.smooth_images[:,:,self.image_number.get()].copy() #hard
# copy necessary to update overview
for i in range(len(blobs)):
try:
#print('blob # ' + repr(i) + ' filled area: '
# + repr(blobs[i].filled_area) )
if self.minimum_filled_area_number.get() \
< blobs[i].filled_area \
< self.maximum_filled_area_number.get() \
and \
blobs[i].major_axis_length\
/blobs[i].minor_axis_length \
> self.major_over_minor_axis_number.get() \
and blobs[i].bbox[0] > self.ROI[-1][0]\
and blobs[i].bbox[1] > self.ROI[-1][1] \
and blobs[i].bbox[2] < self.ROI[-1][2] \
and blobs[i].bbox[3] < self.ROI[-1][3]:
# the next line takes the bounding box for
# each found blob (with constraints defined
# above) and compares it to the detected blob
# and sets the value for that blob apart
#self.image_to_plot[blobs[i].bbox[0]:blobs[i].bbox[2], blobs[i].bbox[1]:blobs[i].bbox[3]][
# blobs[i].filled_image] = 0
# Find it hard to see well when two animals
# are counted as one - bounding boxes should
# help here
try:
rr, cc = line(int(blobs[i].bbox[0]),
int(blobs[i].bbox[1]),
int(blobs[i].bbox[0]),
int(blobs[i].bbox[3])) # top horizontal
self.image_to_plot[rr, cc] = 0
rr, cc = line(int(blobs[i].bbox[0]),
int(blobs[i].bbox[3]),
int(blobs[i].bbox[2]),
int(blobs[i].bbox[3])) # right vertical
self.image_to_plot[rr, cc] = 0
rr, cc = line(int(blobs[i].bbox[2]),
int(blobs[i].bbox[1]),
int(blobs[i].bbox[2]),
int(blobs[i].bbox[3])) # bottom horizontal
self.image_to_plot[rr, cc] = 0
rr, cc = line(int(blobs[i].bbox[0]),
int(blobs[i].bbox[1]),
int(blobs[i].bbox[2]),
int(blobs[i].bbox[1])) # left vertical
self.image_to_plot[rr, cc] = 0
# update the number of animals
self.number_of_animals.set(
self.number_of_animals.get() + 1)
except IndexError:
print('while drawing the bounding box had '
'index error, likely action at the '
'edge of the arena')
except ZeroDivisionError:
# seems to happend during initalization
pass
self.image_of_background.set_data(self.image_to_plot)
self.canvas_overview.restore_region(self.canvas_overview_background)
self.ax_overview.draw_artist(self.image_of_background)
# after animals have been identified let user scroll
# through the experiment.
for i in range(len(self.text_artists)):
self.text_artists[i].remove()
self.text_artists = []
i_text_counter = 0
if self.centroids is not None:
for i_text in range(self.centroids.shape[1]):
if not np.isnan(self.centroids[0, i_text, self.image_number.get()]):
self.text_artists.append(
self.ax_overview.text(int(
self.bounding_boxes[3, i_text,self.image_number.get()]),
int(self.bounding_boxes[0,i_text, self.image_number.get()]),
repr(i_text)))
try:
self.ax_overview.draw_artist(
self.text_artists[i_text_counter])
# error happens if user zooms into an area
# where no text needs to be updated
except IndexError:
pass
# only add to text_counter if not nan
i_text_counter += 1
# also update the rectangle
self.ax_overview.add_patch(self.rect)
self.rect.set_width(self.ROI[-1][3] - self.ROI[-1][1])
self.rect.set_height(self.ROI[-1][2] - self.ROI[-1][0])
self.rect.set_xy((self.ROI[-1][1], self.ROI[-1][0]))
self.ax_overview.draw_artist(self.rect)
try:
self.scat_artists.remove()
self.scat_artists = []
except TypeError:
pass
self.canvas_overview.blit(self.ax_overview.bbox)
print('frame # ' + repr(self.image_number.get()))
# also update the blobplot
self.blob_plot_indicator.set_ydata([self.image_number.get(),
self.image_number.get()])
self.canvas_blob_plot.draw()
self.child.update()
[docs] def play_func(self):
"""
Function is called when user presses the "Start playing" button.
"""
if self.play_button['text'] == 'Stop Playing':
self.play_button['text'] = 'Start Playing'
else:
self.play_button['text'] = 'Stop Playing'
while self.play_button['text'] == 'Stop Playing':
if self.image_number.get() < self.smooth_images.shape[2]-1:
time_start = time.time()
self.update_visualization()
self.image_number.set(self.image_number.get() + 1)
# play as fast as requested
if (time.time() - time_start) * 1000 \
< 1000 / self.recording_framerate \
* (1 / self.playback_speed_value):
print(repr(int(
round(((1000 / self.recording_framerate
* (1 / self.playback_speed_value))
- (time.time() - time_start))))))
self.child.after(
int(round(((1000 / self.recording_framerate
* (1 / self.playback_speed_value))
- (time.time() - time_start)))))
else:
print(time.time() - time_start) # notify the user somehow
else:
self.image_number.set(0)
def callback_func(self):
self.child.after(500, self.callback_func)
if self.playback_speed_variable.get() != 'Custom':
if self.playback_speed_value \
!= float(self.playback_speed_variable.get()[:-1]):
self.playback_speed_value \
= float(self.playback_speed_variable.get()[:-1])
else:
print('Todo: open popup asking user for a custom speed')
# Todo open popup asking user for a custom speed
[docs] def manually_jump_to_frame_func(self):
"""
Function is called when user presses the "Jump to frame" button.
"""
try:
if 0 < int(self.manually_jump_to_frame_number.get()) \
< self.smooth_images.shape[2]:
self.image_number.set(int(self.manually_jump_to_frame_number.get()))
self.update_visualization()
else:
messagebox.showerror(
"Invalid Input",
"You have entered a value smaller than 0 or "
"\nlarger than the exisiting number of frames ("
+ repr( self.smooth_images.shape[2] - 1)
+") \n \n Please enter a number between 0 and " +
repr(self.smooth_images.shape[2]))
except ValueError:
messagebox.showerror(
"Invalid Input",
"You have not entered an Integer number. "
"\n \n Please enter a number between 0 and "
+ repr(self.smooth_images.shape[2] - 1))
[docs] def detect_blobs(self):
"""
This function is intended to be used "pre-tracking": If the
user thinks the Image parameters are ok and they press
"Detect blobs" this function is called. It checks for the
number of blobs fitting the Image parameters for each frame.
This will make it obvious where the image parameters are
producing incorrect results.
The function does the following:
#. Subtract all images from the background image.
#. Threshold (binarize) the subtracted image using the user
defined Threshold Image parameter.
#. Loop through the subtracted frames and call the
`"regionprops" <https://scikit-image.org/docs/dev/api/skimage.measure.html#skimage.measure.regionprops>`_.
function on each frame.
#. Loop through each of the blobs and determine if they are
counting as animals, i.e. by comparing their filled area
to the user defined minimum and maximum filled area.
#. If they count as animals, just count how many per frame do
count.
#. Plot the blobs identified as animals in the plot on the
right side of the main window.
"""
# reset list
self.number_of_blobs = []
subtracted_images = self.background[:,:,np.newaxis].astype(np.int16) \
- self.smooth_images.astype(np.int16)
mean_images = np.nanmean(subtracted_images, axis=(0, 1))
std_images = np.std(subtracted_images, axis=(0, 1))
thresholded_images = subtracted_images[:, :] > mean_images \
+ self.threshold_number.get() * std_images
for i_images in range(self.smooth_images.shape[2]):
#print('image number ' + repr(i_images))
blob_counter = 0
blobs = measure.regionprops(measure.label(
thresholded_images[self.ROI[-1][0]:self.ROI[-1][2],
self.ROI[-1][1]:self.ROI[-1][3],i_images]))
for i_blobs in blobs:
if self.minimum_filled_area_number.get() \
< i_blobs.filled_area \
< self.maximum_filled_area_number.get():
try:
if i_blobs.major_axis_length/i_blobs.minor_axis_length \
> self.major_over_minor_axis_number.get():
blob_counter +=1
except ZeroDivisionError:
#small blobs will do this
pass
self.number_of_blobs.append(blob_counter)
# update the plot
self.blob_plot_background.set_xdata(self.number_of_blobs)
#self.ax_blob_plot.set_ylim(0,len(self.number_of_blobs))
# re-scale the x_lim to - and + 10% of the minimum and maximum number of blobs, respectively
self.ax_blob_plot.set_xlim(
int(np.floor(
min(self.number_of_blobs)-min(self.number_of_blobs)*0.1)),
int(np.ceil(
max(self.number_of_blobs)+max(self.number_of_blobs)*0.1)))
self.canvas_blob_plot.draw()
[docs] def update_overview_func(self):
"""
Function is called when user presses the "Update Overview
Button. Just changes the bool used in
:func:`update_visualization`
"""
if self.update_overview_button['text'] == 'Not updating Overview':
self.update_overview_button['text'] = 'Updating Overview'
self.update_overview_bool = True
if self.play_button['text'] == 'Start Playing':
self.update_visualization()
else:
self.update_overview_button['text'] = 'Not updating Overview'
self.update_overview_bool = False
[docs] def ask_user_correct_animal_classification(self):
"""
This function is called after the user presses "Track Animals".
#. Creates a popup window to show the current frame
#. Subtracts the current image from the background image
#. Thresholds (binarizes) the subtracted image with the user
defined Treshold.
#. Identifies all blobs in the current image by calling the
`"regionprops" <https://scikit-image.org/docs/dev/api/skimage.measure.html#skimage.measure.regionprops>`_.
function.
#. For each identified blob, determine whether it counts as
an animal according to the user defined image parameters.
#. If yes, draw a box around that blob.
#. Display the resulting image and ask the user if the
identified and numbered blobs are indeed animals and if the
tracking algorithm should start.
.. important::
The number of animals identified here is used as the
'ground truth' of how many animals are present during the
experiment.
"""
second_child = tk.Toplevel()
second_child.grab_set()
second_child.wm_title('Identify animals')
second_child_frame = tk.Frame(second_child)
second_child_frame.grid(row=0, column=0)
if large_plots:
fig_animal_classification = Figure(figsize=(11, 7))
else:
fig_animal_classification = Figure(figsize=(6,4))
ax_animal_classification = fig_animal_classification.add_subplot(111)
image_of_animal_classification = ax_animal_classification.imshow(
self.smooth_images[:,:,self.image_number.get()],
vmin=0, vmax=255, cmap=self.colormap)
fig_animal_classification.tight_layout()
frame_animal_classification = tk.Frame(second_child_frame)
frame_animal_classification.grid(row=0, column=0,columnspan=2)
canvas_animal_classification = tkagg.FigureCanvasTkAgg(
fig_animal_classification, master=frame_animal_classification)
canvas_animal_classification.draw()
canvas_animal_classification_background = \
canvas_animal_classification.copy_from_bbox(
ax_animal_classification.bbox)
# Add the toolbar
overview_toolbar = tkagg.NavigationToolbar2Tk(
canvas_animal_classification, frame_animal_classification)
overview_toolbar.update()
# The next line is necessary to actually show the figure
canvas_animal_classification.get_tk_widget().pack()
def cancel_tracking():
# set main window active again
second_child.grab_release()
# close the child window
second_child.after(0, second_child.destroy())
# Button to return to previous window and select a different
# frame/different settings
cancel_tracking_button = tk.Button(second_child_frame,
text='Not good, take me back',
command = cancel_tracking)
cancel_tracking_button.grid(row=2,column=0)
def start_tracking():
# calculate the thresholded images with the STD at this point
subtracted_images = self.background[:,:,np.newaxis].astype(np.int16) \
- self.smooth_images.astype(np.int16)
mean_images = np.nanmean(subtracted_images,axis=(0,1))
# Have a memory problem: instead of taking the std of all
# images, just take the std of a single image in the
# middle of the video
# TODO: Revisit
std_images = np.nanstd(
subtracted_images[:,:,-int(subtracted_images.shape[2]/2)])
# TODO: Fix Typo
self.thresholed_images = subtracted_images > mean_images \
+ self.threshold_number.get()*std_images
mean_images = None # free memory
std_images = None # free memory
# set main window active again
second_child.grab_release()
# close the child window
second_child.after(0, second_child.destroy())
# start tracking
self.tracking_start()
start_tracking_button = tk.Button(
second_child_frame,
text='Looks good, start tracking',
command = start_tracking)
start_tracking_button.grid(row=2,column=1)
# TODO: Check what's going on here - This was just done 10
# lines up!
subtracted_images = self.background[:,:].astype(np.int16) \
- self.smooth_images[:,:,self.image_number.get()].astype(np.int16)
mean_images = np.nanmean(subtracted_images, axis=(0, 1))
std_images = np.std(subtracted_images, axis=(0, 1))
# only one image for speed
thresholded_images = subtracted_images[:, :] > mean_images \
+ self.threshold_number.get() * std_images
blobs = measure.regionprops(measure.label(thresholded_images))
image_to_plot = self.smooth_images[:,:,self.image_number.get()].copy()
# reset in case user clicks on tracking twice
self.initial_centroids = []
self.initial_bounding_boxes = []
identified_animals = 0
for i_blobs in blobs:
try:
long_axis = i_blobs.major_axis_length / \
i_blobs.minor_axis_length
except ZeroDivisionError:
# this happens when the blob is tiny, so just assume
# it to be "round" (both axes are same length and
# therefore the ratio is 1)
long_axis = 1
if self.minimum_filled_area_number.get() \
< i_blobs.filled_area \
< self.maximum_filled_area_number.get() \
and long_axis > self.major_over_minor_axis_number.get() \
and i_blobs.bbox[0] > self.ROI[-1][0] \
and i_blobs.bbox[1] > self.ROI[-1][1] \
and i_blobs.bbox[2] < self.ROI[-1][2] \
and i_blobs.bbox[3] < self.ROI[-1][3]:
try:
rr, cc = line(int(i_blobs.bbox[0]),
int(i_blobs.bbox[1]),
int(i_blobs.bbox[0]),
int(i_blobs.bbox[3])) # top horizontal
image_to_plot[rr, cc] = 0
rr, cc = line(int(i_blobs.bbox[0]),
int(i_blobs.bbox[3]),
int(i_blobs.bbox[2]),
int(i_blobs.bbox[3])) # right
# vertical
image_to_plot[rr, cc] = 0
rr, cc = line(int(i_blobs.bbox[2]),
int(i_blobs.bbox[1]),
int(i_blobs.bbox[2]),
int(i_blobs.bbox[3])) # bottom horizontal
image_to_plot[rr, cc] = 0
rr, cc = line(int(i_blobs.bbox[0]),
int(i_blobs.bbox[1]),
int(i_blobs.bbox[2]),
int(i_blobs.bbox[1])) # left vertical
image_to_plot[rr, cc] = 0
self.initial_centroids.append(i_blobs.centroid) # come as tuples anyway, no need to list
self.initial_bounding_boxes.append(i_blobs.bbox) # come as tuples anyway, no need to list
identified_animals += 1
except IndexError:
print('while drawing the bounding box had index '
'error, likely action at the edge of the arena')
animal_number = tk.Label(
second_child_frame,
text= repr(identified_animals)
+ ' animals have been identified '
'\nDuring tracking, this is the expected # of '
'animals')
animal_number.config(font=("Helvetica", 16))
animal_number.grid(row=1, column=0, columnspan=2)
image_of_animal_classification.set_data(image_to_plot)
canvas_animal_classification.restore_region(
canvas_animal_classification_background)
ax_animal_classification.draw_artist(image_of_animal_classification)
for i_text in range(len(self.initial_centroids)):
ax_animal_classification.text(
int(self.initial_bounding_boxes[i_text][3]),
int(self.initial_bounding_boxes[i_text][0]),
repr(i_text))
canvas_animal_classification.draw()
[docs] def tracking_start(self):
"""
This function organizes the tracking of the animals.
It pre-allocates the numpy array for the centroid positions
after identifying the correct number of animals in the
current frame.
The actual tracking function, the tracking_loop(), is defined
locally in this function. the tracking_loop() function is
called in the correct order in here.
If the details in the documentation of this class are not
sufficient please have a look at the heavily annotated source
code of tracking_loop() function (line 1228)
"""
print('tracking starts')
# Identify all blobs in the image that user has defined as a
# good image with the user parameters provided. This is
# identical to what happend in func
# ask_user_correct_animal_classification() but cheap,
# so repeated here to be more explicit (and not moving
# variables around)
blobs = measure.regionprops(measure.label(
self.thresholed_images[:, :, self.image_number.get()]))
blob_counter = 0
# Just count how many blobs are defined as animals
for i_blobs in blobs:
# heuristics to detect blobs that look like animals -
try:
long_axis = i_blobs.major_axis_length / \
i_blobs.minor_axis_length
except ZeroDivisionError:
# this happens when the blob is tiny
long_axis = 1
if self.minimum_filled_area_number.get() \
< i_blobs.filled_area \
< self.maximum_filled_area_number.get() \
and i_blobs.minor_axis_length > 0 \
and long_axis > self.major_over_minor_axis_number.get() \
and i_blobs.bbox[0] > self.ROI[-1][0] \
and i_blobs.bbox[1] > self.ROI[-1][1] \
and i_blobs.bbox[2] < self.ROI[-1][2] \
and i_blobs.bbox[3] < self.ROI[-1][3]:
blob_counter += 1
# As the user just confirmed (Clicked on button that said
# start tracking after presenting identified animals) that
# the number of blobs corresponds to the number of animals
# pre-allocated centroid and bounding boxes
self.centroids = np.zeros((3, blob_counter,
self.smooth_images.shape[2]))
self.centroids.fill(np.nan)
self.bounding_boxes = np.zeros((4, blob_counter,
self.smooth_images.shape[2]))
self.bounding_boxes.fill(np.nan)
# an empty array that is taken through the tracking loop.
# Saves the position of the animals that are currently
# not being detected.
missing_animal = np.zeros((2, blob_counter))
missing_animal.fill(np.nan)
# identify the inital position of the animal - Necessary as
# this will be treated as a 'ground truth' about both the
# number of animals that should be detected as well as the
# position of the animals. As this tracking algorithm mainly
# goes for distance between blobs this is essential to be
# able to get semi-correct assignment of animals
blob_counter = 0
for i_blobs in blobs:
# heuristics to detect blobs that look like animals
if self.minimum_filled_area_number.get() \
< i_blobs.filled_area \
< self.maximum_filled_area_number.get() \
and i_blobs.minor_axis_length > 0 \
and i_blobs.major_axis_length \
/ i_blobs.minor_axis_length \
> self.major_over_minor_axis_number.get() \
and i_blobs.bbox[0] > self.ROI[-1][0] \
and i_blobs.bbox[1] > self.ROI[-1][1] \
and i_blobs.bbox[2] < self.ROI[-1][2] \
and i_blobs.bbox[3] < self.ROI[-1][3]:
# assign centroid position as int (index only can take int)
# row, then column, number of animals at this position
self.centroids[:, blob_counter, self.image_number.get()] = \
int(round(i_blobs.centroid[0])), \
int(round(i_blobs.centroid[1])), 1
# also assign original bounding box
self.bounding_boxes[:, blob_counter, self.image_number.get()] = \
int(i_blobs.bbox[0]),\
int(i_blobs.bbox[1]),\
int(i_blobs.bbox[2]),\
int(i_blobs.bbox[3])
blob_counter += 1
def tracking_loop(backwards = True):
"""
As this is a post-hoc analysis the analysis can go
backwards and forwards in time. The principle of this
algorithm is that the user provides a frame where all
animals are visible so that location and number can
be classified.
This function takes the current image (variable bound to
image_number_scale: self.image_number), identifies all
the blobs that look like animals. It then calculates the
distance of the centroids in the current frame to the
centroids in the previously analyzed frame. If no animal
has been detected in the last frame (i.e. because the
animals crashed or because they were hiding in the
shadows) just take the last known position and calculate
the distance.
"""
# identify all blobs in image
blobs = measure.regionprops(measure.label(
self.thresholed_images[:, :, i_frame]))
animals_counted = 0
# The following three arrays hold the data that is
# necessary to correctly assign each blob to a previously
# identified animal.
# The easiest way to think about these is to think about
# stacking each of them in a excel sheet: minimal_dist
# will be in the first column (A) and hold rows 1-x (
# x=number of animals). It will hold (once it has been
# filled) the minimal distance (in pixels) between the
# blobs in the current frame and the previously analyzed
# frame.
# animal_index will be in the second column (B) from row
# 1-x. It holds the index (== identifier) of the animal
# identified in the PREVIOUSLY analyzed frame.
# blob_index: As many more blobs are identified (cameras
# are noisy, experimental setups not perfect) there are
# usually a ton of blobs identified. It can easily range
# into double, sometimes triple digits. Using the user
# input for minimal and maximal filled area and the major
# divided by minor axis most of these blobs can be
# discarded as not being animals. The blob_index holds
# the index that points in the blob 'array' to the blob
# we're interested in. In the example above (excel sheet)
# the blob index would be in column C and tell us which
# blob needs to be assigned as being which animal
# Typical example (AFTER the assignment, of course):
# minimal_dist animal_index blob_index
# 1.423 0 5
# 2.534 2 3
# 0.436 1 10
minimal_dist = np.zeros((self.centroids.shape[1]))
# filled with NaNs as zeros would always be the smallest
# distance
minimal_dist.fill(np.nan)
animal_index = np.zeros((self.centroids.shape[1]))
animal_index.fill(np.nan)
blob_index = np.zeros((self.centroids.shape[1]))
blob_index.fill(np.nan)
# again, count number of blobs that count as animals in
# the current frame
for j_blob in blobs:
if self.minimum_filled_area_number.get() \
< j_blob.filled_area \
< self.maximum_filled_area_number.get() \
and j_blob.minor_axis_length > 0 \
and j_blob.major_axis_length / j_blob.minor_axis_length \
> self.major_over_minor_axis_number.get() \
and j_blob.bbox[0] > self.ROI[-1][0] \
and j_blob.bbox[1] > self.ROI[-1][1] \
and j_blob.bbox[2] < self.ROI[-1][2] \
and j_blob.bbox[3] < self.ROI[-1][3]:
animals_counted += 1
# creating this working copy of the previously analyzed
# self.centroids makes the code much more readable.
if backwards:
previous_centroid_positions = \
self.centroids[:2, :, i_frame + 1].copy()
# if we don't go backwards we are going forward. The last
# frame would then be in the past.
else:
previous_centroid_positions = \
self.centroids[:2, :, i_frame - 1].copy()
if backwards:
# count number of animals counted in previous frame
animals_counted_in_previous_frame = \
np.nansum(self.centroids[2, :, i_frame + 1])
# if we don't go backwards we are going forward. The last
# frame would then be in the past.
else:
animals_counted_in_previous_frame =\
np.nansum(self.centroids[2, :, i_frame - 1])
# check if there are less animals in the currently
# analyzed frame compared to the last
if animals_counted_in_previous_frame \
< self.centroids.shape[1]:
# which index is was missing last frame?
for i_missing in range(len(
np.where(np.isnan(
previous_centroid_positions[0]))[0])):
# find the index of the missing animal in the
# last frame
np.where(np.isnan(
previous_centroid_positions[0]))[0][i_missing]
# just add a one for each frame that the animal
# is missing
number_of_frames_animal_lost[np.where(np.isnan(
previous_centroid_positions[0]))[0][i_missing]] \
+= 1
# If an animal that should exist could not have been
# assigned a blob in the previously analyzed frame it
# will be indicated as np.nan. As it can appear again (
# either because animals part again after a crash or
# because the the animal comes back from hiding) take the
# last known position and try to assign any blobs in the
# currently analyzed frame to the 'lost' animal.
# It's a for loop to make sure it scales easily
for m in range(len(np.argwhere(np.isnan(
previous_centroid_positions[0, :])))):
# assign to previous centroid position search array
# so that distance to the current centroid position can
# be calculated. The idea really is just to catch
# animal that re-appear. Shouldn't lead to a lot of
# jumping around of the centroid
previous_centroid_positions[:, np.argwhere(np.isnan(
previous_centroid_positions[0, :]))[0][0]] \
= missing_animal[:, np.argwhere(np.isnan(
previous_centroid_positions[0, :]))[0][0]]
# reset the animal counter
animals_counted = 0
# go through all the blobs
for j_blob in range(len(blobs)):
# Use user provided rules to identify blobs that look like animals.
if self.minimum_filled_area_number.get() \
< blobs[j_blob].filled_area \
< self.maximum_filled_area_number.get() \
and blobs[j_blob].minor_axis_length \
> 0 and\
blobs[j_blob].major_axis_length \
/ blobs[j_blob].minor_axis_length \
> self.major_over_minor_axis_number.get() \
and blobs[j_blob].bbox[0] \
> self.ROI[-1][0] \
and blobs[j_blob].bbox[1] \
> self.ROI[-1][1] \
and blobs[j_blob].bbox[2] \
< self.ROI[-1][2] \
and blobs[j_blob].bbox[3] \
< self.ROI[-1][3]:
# switch that gets turned on if animal has been
# found - need one for each blob
found_animal = False
# for each blob that is accepted as an animal,
# calculate the distance to all blobs in previous
# frame - only keep the minimal distance and the
# animal that had the minimal distance. Number of
# position also indicates which position
for n_animal in range(previous_centroid_positions.shape[1]):
# Check if the animal index is already taken.
# If so, first come first serve, just skip
# that previous animal position! This solves
# the problem that when two animals crash it
# can happen that when they part again the
# minimal distance might be pointing to the
# same previously defined animal which leads
# to loss of one of the two animals. This if
# clause ensures that we never have two
# animals assigned to the the same animal
# index. It's better to have the NaN and
# interpolate afterwards
if n_animal not in animal_index:
# for readability, explicity calculate distance here:
current_dist = np.linalg.norm(
previous_centroid_positions[:, n_animal]
- np.asarray((blobs[j_blob].centroid[0],
blobs[j_blob].centroid[1]
))
)
# as the minimal_dist array is filled
# with NaNs we can just check if this is
# the first time in this loop we are
# trying to assign a minimal distance to
# this animal
if np.isnan(minimal_dist[animals_counted]):
# only assign the minimal_dist if the minimal dist is actually realistic
if current_dist \
< max_speed_pixel \
* number_of_frames_animal_lost[n_animal]:
# calculate eucledian minimal
# distance between current blob
# centroid and previously
# identified centroid of
# currently analyzed
minimal_dist[animals_counted] = current_dist
# previous animal index
animal_index[animals_counted] = n_animal
# current animal as blob index
blob_index[animals_counted] = j_blob
# turn switch
found_animal = True
else:
# only assign the minimal_dist if the minimal dist is actually realistic
if current_dist < max_speed_pixel * number_of_frames_animal_lost[n_animal]:
# if minimal distance larger than distance to next previous animals
if current_dist < minimal_dist[animals_counted]:
# update the minimal distance to the smaller one
minimal_dist[animals_counted] = current_dist
# and of course also the index for both the previous animal
animal_index[animals_counted] = n_animal
# and where to find the current animal in the blob index
blob_index[animals_counted] = j_blob
if found_animal:
animals_counted += 1
# assign each animal to the closest previous animal -
# in case an animal is lost/crashed it will just stay empty
# as no new animal is closer than to another, still
# existing animal.
for n_animal_assignment in range(self.centroids.shape[1]):
if not np.isnan(blob_index[n_animal_assignment]):
self.centroids[:, int(animal_index[n_animal_assignment]), i_frame] = \
int(blobs[int(blob_index[int(n_animal_assignment)])].centroid[0]), \
int(blobs[int(blob_index[int(n_animal_assignment)])].centroid[1]), 1
self.bounding_boxes[:, int(animal_index[n_animal_assignment]), i_frame] = \
int(blobs[int(blob_index[int(n_animal_assignment)])].bbox[0]), \
int(blobs[int(blob_index[int(n_animal_assignment)])].bbox[1]), \
int(blobs[int(blob_index[int(n_animal_assignment)])].bbox[2]), \
int(blobs[int(blob_index[int(n_animal_assignment)])].bbox[3])
# if that animl was lost before, reset counter to 1 as it was just found again!
if number_of_frames_animal_lost[int(animal_index[n_animal_assignment])] != 1:
number_of_frames_animal_lost[int(animal_index[n_animal_assignment])] = 1
# How many previously identified animals could not be assigned a blob this frame?
if np.nansum(self.centroids[2, :, i_frame]):
# for each of those:
for m in range(len(np.argwhere(np.isnan(self.centroids[2, :, i_frame])))):
# only insert the nan in case it hasn't been a
# nan before. Essentially we want to save the
# last known position of the animal so that we
# can interpolate afterwards
if np.isnan(missing_animal[0, np.argwhere(np.isnan(self.centroids[2, :, i_frame]))[m][0]]):
if backwards:
missing_animal[:, np.argwhere(np.isnan(self.centroids[2, :, i_frame]))[m][0]] = \
self.centroids[0:2, np.argwhere(np.isnan(self.centroids[2, :, i_frame]))[m][0], i_frame + 1]
else:
missing_animal[:, np.argwhere(np.isnan(self.centroids[2, :, i_frame]))[m][0]] = \
self.centroids[0:2, np.argwhere(np.isnan(self.centroids[2, :, i_frame]))[m][0], i_frame - 1]
# plotting
# Allows user to change this bool even while analyzing -
# much faster when not updating!
if self.update_overview_bool:
# hard copy necessary to update overview
self.image_to_plot = self.smooth_images[:,:,i_frame].copy()
# for each animal
for i_plot in range(self.centroids.shape[1]):
# that could be identified
if not np.isnan(self.bounding_boxes[0, i_plot, i_frame]):
# add a bounding box - try except should only
# catch the case where animal is hugging the
# edge and bounding box is would be outside
# of frame - shouldn't happen
try:
rr, cc = line(int(self.bounding_boxes[0,i_plot, i_frame]),
int(self.bounding_boxes[1,i_plot, i_frame]),
int(self.bounding_boxes[0,i_plot, i_frame]),
int(self.bounding_boxes[3,i_plot, i_frame])) # top horizontal
self.image_to_plot[rr, cc] = 0
rr, cc = line(int(self.bounding_boxes[0,i_plot, i_frame]),
int(self.bounding_boxes[3,i_plot, i_frame]),
int(self.bounding_boxes[2,i_plot, i_frame]),
int(self.bounding_boxes[3,i_plot, i_frame])) # right vertical
self.image_to_plot[rr, cc] = 0
rr, cc = line(int(self.bounding_boxes[2,i_plot, i_frame]),
int(self.bounding_boxes[1,i_plot, i_frame]),
int(self.bounding_boxes[2,i_plot, i_frame]),
int(self.bounding_boxes[3,i_plot, i_frame])) # bottom horizontal
self.image_to_plot[rr, cc] = 0
rr, cc = line(int(self.bounding_boxes[0,i_plot, i_frame]),
int(self.bounding_boxes[1,i_plot, i_frame]),
int(self.bounding_boxes[2,i_plot, i_frame]),
int(self.bounding_boxes[1,i_plot, i_frame])) # left vertical
self.image_to_plot[rr, cc] = 0
except IndexError:
print('while drawing the bounding box had'
' index error, likely action at the '
'edge of the arena')
# manual updating of plot to gain speed
self.image_of_background.set_data(self.image_to_plot)
self.canvas_overview.restore_region(self.canvas_overview_background)
self.ax_overview.draw_artist(self.image_of_background)
# draw the ROI indicator
self.ax_overview.add_patch(self.rect)
self.rect.set_width(self.ROI[-1][3] - self.ROI[-1][1])
self.rect.set_height(self.ROI[-1][2] - self.ROI[-1][0])
self.rect.set_xy((self.ROI[-1][1], self.ROI[-1][0]))
self.ax_overview.draw_artist(self.rect)
# after animals have been identified let user scroll
# through the experiment.
for i in range(len(self.text_artists)):
self.text_artists[i].remove()
self.text_artists = []
# label the bounding boxes with the index of the
# array, which of course is the identity of the animals
# at this point
i_text_counter = 0
for i_text in range(self.centroids.shape[1]):
# make sure there's no NaN
if not np.isnan(self.centroids[0,i_text,i_frame]):
self.text_artists.append(
self.ax_overview.text(int(
self.bounding_boxes[3, i_text,i_frame]),
int(
self.bounding_boxes[
0, i_text, i_frame]),
repr(i_text)))
try:
self.ax_overview.draw_artist(
self.text_artists[i_text_counter])
# Error happens if user zooms in an area
# where this text box does not need to be drawn
except IndexError:
pass
# only increment text_counter if not nan!
i_text_counter += 1
self.canvas_overview.blit(self.ax_overview.bbox)
# always update frame indicator on the blobplot
self.blob_plot_indicator.set_ydata([
self.image_number.get(), self.image_number.get()])
self.canvas_blob_plot.draw()
self.child.update()
start_frame = self.image_number.get()
number_of_frames_animal_lost = np.ones((blob_counter))
if self.pixel_per_mm is None or self.pixel_per_mm == 0:
max_speed_pixel = (self.max_speed_number.get() /
self.recording_framerate * 2.5)
else:
max_speed_pixel = (self.max_speed_number.get() *
self.pixel_per_mm)/\
self.recording_framerate * 2.5
#max_speed_pixel = 3.8
for i_frame in reversed(range(start_frame)):
self.image_number.set(i_frame)
tracking_loop(backwards=True)
for i_frame in range(start_frame, self.centroids.shape[2]):
self.image_number.set(i_frame)
tracking_loop(backwards=False)
#print(self.centroids[:,:,0:10])
self.interpolate_button.config(state="normal")
# plot the overview
self.cmap = self.get_cmap(self.centroids.shape[1]+1)
fig = Figure(figsize=(13,10))
canvas = FigureCanvas(fig)
ax = fig.add_subplot(111)
ax.imshow(self.background, cmap='Greys_r')
for i in range(self.centroids.shape[1]):
ax.scatter(x=self.centroids[1, i, ~np.isnan(self.centroids[1, i, :])],
y=self.centroids[0, i, ~np.isnan(
self.centroids[1, i, :])],
c=np.array(self.cmap(i)).reshape(1,4))
fig.tight_layout()
canvas.print_figure('Multi_animal_outpout_wo_interpolation.jpg')
csv_object = pd.DataFrame({'Frame' : np.arange(0, self.centroids.shape[2])})
if self.timestamps is not None:
# time in timestamp is saved as epoch time - to get time since beginning of experiment just subtract
# the first value from every value
csv_object['Time'] = self.timestamps[:,1]-self.timestamps[0,1]
messagebox.showinfo('Tracking Done',
'Finished Tracking.'
'\n\n'
'Find the "XY_position.csv" file in the'
'\n experimental folder')
for i in range(self.centroids.shape[1]):
csv_object['X-Centroid, Animal#' + repr(i)] = self.centroids[1, i, :]
csv_object['Y-Centroid, Animal#' + repr(i)] = self.centroids[0, i, :]
csv_object.to_csv('XY_postion.csv', sep=',')
self.show_tracking_result()
[docs] def interpolate(self):
"""
During tracking it can happen that animals are not identified
in every frame.
This function allows to interpolate the trajectories.
.. warning::
This is an experimental feature. It can produce very wrong
results
For each identified animal there is "last frame" where it has
been identified and a "new frame" where it is identified
again. This function assumes that the animal moved with a
constant speed and in linear fashion and just does a linear
interpolation between these coordinates.
.. important::
An important assumption is that the initial assignment was
relatively correct. Small errors can lead to huge effects
when using the interpolation function
"""
csv_object = None
csv_object = pd.DataFrame({'Frame' : np.arange(0, self.centroids.shape[2])})
for i in range(self.centroids.shape[1]):
csv_object['X-Centroid, Animal#' + repr(i)] = self.centroids[1, i, :]
csv_object['Y-Centroid, Animal#' + repr(i)] = self.centroids[0, i, :]
try:
# Not totally sure if pandas is rounding before converting to int.
csv_object.interpolate().astype(int).to_csv('XY_position_interpolated.csv', sep=',')
except ValueError:
csv_object.interpolate().to_csv('XY_position_interpolated_non_int.csv', sep=',')
self.image_to_plot = self.smooth_images[:, :, -1].copy()
self.image_of_background.set_data(self.image_to_plot)
self.canvas_overview.restore_region(self.canvas_overview_background)
self.ax_overview.draw_artist(self.image_of_background)
# draw the ROI indicator
self.ax_overview.add_patch(self.rect)
self.rect.set_width(self.ROI[-1][3] - self.ROI[-1][1])
self.rect.set_height(self.ROI[-1][2] - self.ROI[-1][0])
self.rect.set_xy((self.ROI[-1][1], self.ROI[-1][0]))
self.ax_overview.draw_artist(self.rect)
# draw the scatterplot indicating the position of the animals
try:
self.scat_artists.remove()
self.scat_artists = []
except TypeError:
pass
for i in range(self.centroids.shape[1]):
try:
self.scat_artists.append(
self.ax_overview.scatter(
x=csv_object['X-Centroid, Animal#' +
repr(i)].interpolate().astype(int),
y=csv_object['Y-Centroid, Animal#' + repr(
i)].interpolate().astype(int),
c=np.array(self.cmap(i)).reshape(1,4)))
except ValueError:
self.scat_artists.append(
self.ax_overview.scatter(
x = csv_object[pd.notnull(
csv_object['X-Centroid, Animal#'
+repr(i)])]['X-Centroid, Animal#'
+ repr(i)],
y = csv_object[pd.notnull(
csv_object['Y-Centroid, Animal#'
+repr(i)])]['Y-Centroid, Animal#'
+ repr(i)]
))
messagebox.showwarning(
'Animal not identified',
'At least one animal has not been assigned centroid'
'\npositions at either the first or last couple of frames')
self.ax_overview.draw_artist(self.scat_artists[i])
self.canvas_overview.blit(self.ax_overview.bbox)
self.child.update()
def show_tracking_result(self):
# Call the plot that shows the traces after plotting
print('plotting in main window')
self.image_to_plot = self.smooth_images[:, :, -1].copy()
self.image_of_background.set_data(self.image_to_plot)
self.canvas_overview.restore_region(self.canvas_overview_background)
self.ax_overview.draw_artist(self.image_of_background)
# draw the ROI indicator
self.ax_overview.add_patch(self.rect)
self.rect.set_width(self.ROI[-1][3] - self.ROI[-1][1])
self.rect.set_height(self.ROI[-1][2] - self.ROI[-1][0])
self.rect.set_xy((self.ROI[-1][1], self.ROI[-1][0]))
self.ax_overview.draw_artist(self.rect)
# draw the scatterplot indicating the
for i in range(self.centroids.shape[1]):
scat_artist = self.ax_overview.scatter(
x=self.centroids[1, i, ~np.isnan(self.centroids[1, i, :])],
y=self.centroids[0, i, ~np.isnan(self.centroids[1, i, :])],
c=np.array(self.cmap(i)).reshape(1,4))
self.ax_overview.draw_artist(scat_artist)
self.canvas_overview.blit(self.ax_overview.bbox)
self.child.update()
def get_cmap(self, n, name='hsv'):
#Returns a function that maps each index in 0, 1, ...,
# n-1 to a distinct RGB color; the keyword argument name must
# be a standard mpl colormap name.
return cm.get_cmap(name, n)