__author__ = 'David Tadres'
__project__ = 'PiVR'
import json
import operator
import os
import tkinter as tk
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from skimage.draw import line
try:
import cv2
except ModuleNotFoundError:
pass # No need to do anything as the function is never called if
# cv2 is not installed
[docs]class FindROI:
"""
This class is used to define the region of interest from a given
regionproperties class.
It also makes sure that the box is never outside of the frame.
"""
def __init__(self, regionproperties, boxsize, size_factor,image):
self.regionproperties = regionproperties
self.boxsize = boxsize
self.size_factor = size_factor
self.image = image
self.row_min = 0
self.row_max = 0
self.col_min = 0
self.col_max = 0
self.centroid_row = 0
self.centroid_col = 0
self.assign()
def assign(self):
self.row_min = self.regionproperties.centroid[0] \
- self.boxsize * self.size_factor
self.row_max = self.regionproperties.centroid[0] \
+ self.boxsize * self.size_factor
self.col_min = self.regionproperties.centroid[1] \
- self.boxsize * self.size_factor
self.col_max = self.regionproperties.centroid[1] \
+ self.boxsize * self.size_factor
if self.row_min < 0:
self.row_min = 0
if self.col_min < 0:
self.col_min = 0
if self.row_max > self.image.shape[0]:
self.row_max = self.image.shape[0] - 1
if self.col_max > self.image.shape[1]:
self.col_max = self.image.shape[1] - 1
self.centroid_row = self.regionproperties.centroid[0]
self.centroid_col = self.regionproperties.centroid[1]
[docs]class MeanThresh():
"""
This class takes an image and calculates the mean intensities and
standard deviation to calculate a threshold which can be use to
segment the image.
If no roi is given, the take whole image is taken into account.
roi must be a roi class object
"""
def __init__(self, image, signal, sigma, roi = None, invert = False):
self.thresh = 0
self.image = image
if roi != None:
self.row_min = roi.row_min
self.row_max = roi.row_max
self.col_min = roi.col_min
self.col_max = roi.col_max
else:
self.row_min = 0
self.row_max = image.shape[0]
self.col_min = 0
self.col_max = image.shape[0]
if signal == 'white':
if invert:
self.operation = operator.sub
else:
self.operation = operator.add
elif signal == 'dark':
if invert:
self.operation = operator.add
else:
self.operation = operator.sub
else:
print('You have to call signal either "bright" or "dark" '
'to continue')
print('Exiting program')
import sys
sys.exit()
self.sigma = sigma
self.calculate_threshold()
[docs] def calculate_threshold(self):
"""
Calculate threshold by:
Depending on animal signal subtracting (white) or adding (dark)
the mean of the pixel intensities in the ROI
with a sigma (provided when class is called)
times the standard deviation of the pixel intensities in the ROI
"""
self.thresh = \
self.operation(
np.nanmean(
self.image[
int(self.row_min):int(self.row_max),
int(self.col_min):int(self.col_max)
]
),
self.sigma * np.nanstd(
self.image[
int(self.row_min):int(self.row_max),
int(self.col_min):int(self.col_max)
]
)
)
[docs]class CallImageROI():
"""
This class consolidates the different calls to provide the ROI of
the animal in a single class.
Different frames of references are being used in the detection
and tracking algorithm: The absolute pixel coordinates and the
search_box.
In different parts of the code different frames of references are
used to call the image ROI:
#. If only the image and the roi are given, the roi
coordinates are given in the absolute frame of reference (
the 640x480 pixels of the image).
#. If the boxsize parameter is given, the input image is
**not** the full image. Instead, it is only the image of
the search box, defined by the boxsize parameter! This is
for example called in
:func:`pre_experiment.FindAnimal.animal_after_box_mode_three`
#. If the slice_input_image parameter is given, the input
image is **not** the full image. Instead, it is only the
image of the search box. In
:func:`fast_tracking.ReallyFastTracking.animal_tracking`
the algorithm
only looks for the animal in a region defined by search_boxes.
When providing the sliced_input_image this is taken into
account.
In order to keep the code as tidy as possible this class will
help calling the ROI using an roi object
"""
def __init__(self, image, roi, boxsize = None,
sliced_input_image = None):
self.image = image
self.row_min = roi.row_min
self.row_max = roi.row_max
self.col_min = roi.col_min
self.col_max = roi.col_max
if boxsize != None:
self.boxsize = boxsize
else:
self.boxsize = None
if sliced_input_image is not None:
self.sliced_input_image = True
self.subtract_row = sliced_input_image[0]
self.subtract_col = sliced_input_image[1]
else:
self.sliced_input_image = None
self.call_image()
[docs] def call_image(self):
"""
Depending on the input (full image, only search box) the
ROI of the image is extracted.
"""
if self.boxsize is not None:
self.small_image = self.image[
int(self.row_min - self.boxsize):int(self.row_max + self.boxsize),
int(self.col_min - self.boxsize):int(self.col_max + self.boxsize)]
elif self.sliced_input_image is not None:
self.small_image = self.image[
int(self.row_min - self.subtract_row):int(self.row_max - self.subtract_row),
int(self.col_min - self.subtract_col):int(self.col_max - self.subtract_col)]
else:
self.small_image = self.image[
int(self.row_min):int(self.row_max),
int(self.col_min):int(self.col_max)]
[docs]class CallBoundingBox():
"""
This class is heavily used in the
:func:`fast_tracking.ReallyFastTracking.animal_tracking` function!
It takes the full image and search_boxes coordinates and returns
only the search_box (or ROI) of the image.
"""
def __init__(self, image, bounding_box):
self.image = image
self.row_min = bounding_box[0]
self.row_max = bounding_box[1]
self.col_min = bounding_box[2]
self.col_max = bounding_box[3]
self.slice_image()
def slice_image(self):
self.sliced_image = self.image[
int(self.row_min):int(self.row_max),
int(self.col_min):int(self.col_max)]
[docs]class DescribeLargestObject():
"""
This class takes a skimage regionprops object (
https://scikit-image.org/docs/dev/api/skimage.measure.html#skimage.measure.regionprops)
These regionprops objects have a list labelled image regions.
If "animal_like" is False, the largest labelled image region (
defined by filled_area) is defined as the animal.
If "animal_like" is True, each labelled image region checked
against the following parameters taken from
"available_organisms.json"
#. A certain range of filled area
#. A certain ratio of the long_axis over the short_axis
#. A certain range of eccentricity
This class analyzes a binary image and defines the largest object
and saves it's bounding box, its major and minor axis and its
centroid coordinates
"""
def __init__(self, regioproperties, roi, boxsize = None,
animal_like=False, filled_area_min=None,
filled_area_max=None, eccentricity_min=None,
eccentricity_max=None,
major_over_minor_axis_min=None,
major_over_minor_axis_max=None
):
self.filled_area = 0
self.eccentricity = 0
self.row_min = 0
self.row_max = 0
self.col_min = 0
self.col_max = 0
self.centroid_row = 0
self.centroid_col = 0
self.major_axis = 0
self.minor_axis = 0
if boxsize != None:
self.boxsize = boxsize
else:
self.boxsize = 0
self.regionproperties = regioproperties
try: # if roi is an roi object (pre_experiment)
self.row_min_internal = roi.row_min
self.col_min_internal = roi.col_min
except AttributeError: # else if roi is a list
self.row_min_internal = roi[0]
self.col_min_internal = roi[1]
# boolean switch that will turn as soon as something like one
# of the if clauses that defines the animal has
# been triggered at least once
self.animal_like_object_detected = False
if animal_like:
self.filled_area_min = filled_area_min
self.filled_area_max = filled_area_max
self.eccentricity_min = eccentricity_min
self.eccentricity_max = eccentricity_max
self.major_over_minor_axis_min = major_over_minor_axis_min
self.major_over_minor_axis_max = major_over_minor_axis_max
self.animal_like_object()
else:
self.largest_object()
[docs] def largest_object(self):
"""
This function is just defining the largest labelled image
region as the animal.
"""
datatype = [('filled area', np.uint16),
('row min', np.uint16),
('col min', np.uint16),
('row max', np.uint16),
('col max', np.uint16),
('centroid row', np.uint16),
('centroid col', np.uint16),
('minor axis', np.float32),
('major axis', np.float32),
('eccentricity',np.float32)]
areas = np.zeros((len(self.regionproperties)), dtype=datatype)
for j_areas in range(len(self.regionproperties)):
areas['filled area'][j_areas] = \
self.regionproperties[j_areas].filled_area
# To get the "real" coordinate (relative to the original
# picture) and not just the coordinates of the
# small picture the minimal row/col needs to be added to
# the found box
areas['row min'][j_areas] = \
self.row_min_internal - self.boxsize + self.regionproperties[j_areas].bbox[0]
areas['col min'][j_areas] = \
self.col_min_internal - self.boxsize + self.regionproperties[j_areas].bbox[1]
areas['row max'][j_areas] = \
self.row_min_internal - self.boxsize + self.regionproperties[j_areas].bbox[2]
areas['col max'][j_areas] = \
self.col_min_internal - self.boxsize + self.regionproperties[j_areas].bbox[3]
areas['centroid row'][j_areas] = \
self.row_min_internal - self.boxsize + self.regionproperties[j_areas].centroid[0]
areas['centroid col'][j_areas] = \
self.col_min_internal - self.boxsize+ self.regionproperties[j_areas].centroid[1]
areas['minor axis'][j_areas] = \
self.regionproperties[j_areas].minor_axis_length
areas['major axis'][j_areas] = \
self.regionproperties[j_areas].major_axis_length
areas['eccentricity'][j_areas] = \
self.regionproperties[j_areas].eccentricity
# After concluding the for loop that cycles through all the
# regionprops, sort them by the size of their filled area
areas_sorted = np.sort(areas, order='filled area')
# And just take the **largest** blob!
self.filled_area = areas_sorted['filled area'][-1]
self.row_min = areas_sorted['row min'][-1]
self.row_max = areas_sorted['row max'][-1]
self.col_min = areas_sorted['col min'][-1]
self.col_max = areas_sorted['col max'][-1]
self.centroid_row = areas_sorted['centroid row'][-1]
self.centroid_col = areas_sorted['centroid col'][-1]
self.minor_axis = areas_sorted['minor axis'][-1]
self.major_axis = areas_sorted['major axis'][-1]
self.eccentricity = areas_sorted['eccentricity'][-1]
[docs] def animal_like_object(self):
"""
This function tests each labelled image region for "animal
likeness".
The largest of these labelled image regions is defined as the
animal
"""
datatype = [('filled area', np.uint16),
('row min', np.uint16),
('col min', np.uint16),
('row max', np.uint16),
('col max', np.uint16),
('centroid row', np.uint16),
('centroid col', np.uint16),
('minor axis', np.float32),
('major axis', np.float32),
('eccentricity', np.float32)]
areas = np.zeros((len(self.regionproperties)), dtype=datatype)
for j_areas in range(len(self.regionproperties)):
#print('self.regionproperties[j_areas].filled_area: '
# + repr(self.regionproperties[j_areas].filled_area))
#print('self.regionproperties[j_areas].eccentricity: '
# + repr(self.regionproperties[j_areas].eccentricity))
#try:
# print('ratio major/minor: ' +
# repr(self.regionproperties[
# j_areas].major_axis_length /
# self.regionproperties[
# j_areas].minor_axis_length))
#except ZeroDivisionError:
# print('cant divide by zero(minor axis length)')
if self.filled_area_min \
< self.regionproperties[j_areas].filled_area \
< self.filled_area_max \
and self.eccentricity_min \
< self.regionproperties[j_areas].eccentricity \
< self.eccentricity_max \
and self.major_over_minor_axis_min \
< self.regionproperties[j_areas].major_axis_length \
/ self.regionproperties[j_areas].minor_axis_length \
< self.major_over_minor_axis_max:
print('found a blob that could be an animal')
areas['filled area'][j_areas] = \
self.regionproperties[j_areas].filled_area
# To get the "real" coordinate (relative to the
# original picture) and not just the coordinates
# of the small picture the minimal row/col needs
# to be added to the found box
areas['row min'][j_areas] = \
self.row_min_internal - self.boxsize + self.regionproperties[j_areas].bbox[0]
areas['col min'][j_areas] = \
self.col_min_internal - self.boxsize + self.regionproperties[j_areas].bbox[1]
areas['row max'][j_areas] = \
self.row_min_internal - self.boxsize + self.regionproperties[j_areas].bbox[2]
areas['col max'][j_areas] = \
self.col_min_internal - self.boxsize + self.regionproperties[j_areas].bbox[3]
areas['centroid row'][j_areas] = \
self.row_min_internal - self.boxsize + self.regionproperties[j_areas].centroid[0]
areas['centroid col'][j_areas] = \
self.col_min_internal - self.boxsize + self.regionproperties[j_areas].centroid[1]
areas['minor axis'][j_areas] = \
self.regionproperties[j_areas].minor_axis_length
areas['major axis'][j_areas] = \
self.regionproperties[j_areas].major_axis_length
areas['eccentricity'][j_areas] = \
self.regionproperties[j_areas].eccentricity
self.animal_like_object_detected = True
# After selecting for animal-like shape, sort to get the
# largest of the animal like shapes and define it as the
# animal.
areas_sorted = np.sort(areas, order='filled area')
self.filled_area = areas_sorted['filled area'][-1]
self.row_min = areas_sorted['row min'][-1]
self.row_max = areas_sorted['row max'][-1]
self.col_min = areas_sorted['col min'][-1]
self.col_max = areas_sorted['col max'][-1]
self.centroid_row = areas_sorted['centroid row'][-1]
self.centroid_col = areas_sorted['centroid col'][-1]
self.minor_axis = areas_sorted['minor axis'][-1]
self.major_axis = areas_sorted['major axis'][-1]
self.eccentricity = areas_sorted['eccentricity'][-1]
[docs]class DrawBoundingBox():
"""
Used only during debug mode when user can see the tracking
algorithm in action.
Indicates the ROI (search box) where the algorithm has detected
the animal.
"""
def __init__(self, image, roi, value):
self.image = image
self.image_with_box = None
self.row_min = roi.row_min
self.row_max = roi.row_max
self.col_min = roi.col_min
self.col_max = roi.col_max
self.value = value
self.draw_box()
[docs] def draw_box(self):
"""
Draws the bounding box directly into the numpy array
"""
self.image_with_box = self.image.copy()
# top horizontal
rr, cc = line(int(self.row_min), int(self.col_min),
int(self.row_min), int(self.col_max))
self.image_with_box[rr, cc] = self.value
# right vertical
rr, cc = line(int(self.row_min), int(self.col_max),
int(self.row_max), int(self.col_max ))
self.image_with_box[rr, cc] = self.value
# bottom horizontal
rr, cc = line(int(self.row_max), int(self.col_min),
int(self.row_max), int(self.col_max ))
self.image_with_box[rr, cc] = self.value
# left vertical
rr, cc = line(int(self.row_min), int(self.col_min),
int(self.row_max), int(self.col_min))
self.image_with_box[rr, cc] = self.value
[docs]class Save():
"""
Used to Save experimental data after the experiment is concluded -
for both the cases where either the experiment is finished as expected or
an error occurred and the experiment abruptly crashed.
"""
def __init__(self,
heads,
tails,
centroids,
image_skel,
image_raw,
image_thresh,
background,
local_threshold,
real_time,
pixel_per_mm,
bounding_boxes,
midpoints,
stimulation = None,
arena=None,
heuristic_data=None,
datetime=None,
time_delay_due_to_animal_detection=0,
loop_time=None,
recording_time=None,
framerate=None,
time_dep_stim_file=None,
high_power_led_bool = False,
pwm_range=100,
save_centroids_npy=False,
save_heads_npy=False,
save_tails_npy=False,
save_midpoints_npy=False,
save_bbox_npy=False,
save_stim_npy=False,
save_thresh_npy=False,
save_skeleton_npy=False,
undistort_dst=None,
undistort_mtx=None,
newcameramtx=None
):
self.heads = heads
self.tails = tails
self.centroids = centroids
self.image_skel = image_skel
self.image_raw = image_raw
self.image_thresh = image_thresh
self.local_threshold = local_threshold
self.smoothed_background = background
self.real_time = real_time # comes in us!
self.pixel_per_mm = pixel_per_mm
self.bounding_boxes = bounding_boxes
self.save_centroids_npy = save_centroids_npy
self.save_heads_npy = save_heads_npy
self.save_tails_npy = save_tails_npy
self.save_midpoints_npy = save_midpoints_npy
self.save_bbox_npy = save_bbox_npy
self.save_stim_npy = save_stim_npy
self.save_thresh_npy = save_thresh_npy
self.save_skeleton_npy = save_skeleton_npy
self.undistort_dst = undistort_dst
self.undistort_mtx = undistort_mtx
self.newcameramtx = newcameramtx
if stimulation is not None:
self.stimulation = stimulation
if high_power_led_bool: # This corrects for the inversion of stim numbers
# in the HP setups
self.stimulation = pwm_range - self.stimulation
else:
self.stimulation = np.zeros((self.heads.shape[0]))
self.arena = arena
self.heuristic_data = heuristic_data
self.datetime = datetime
self.midpoints = midpoints
self.time_delay_due_to_animal_detection = \
time_delay_due_to_animal_detection
# change real_time from us to s for ease of readibility
self.real_time /= 1e6
self.loop_time = loop_time
self.recording_time = recording_time
self.recording_framerate = framerate
self.time_dep_stim_file = time_dep_stim_file
self.high_power_led_bool = high_power_led_bool
self.pwm_range = pwm_range
# IF the experiment is run on a HP setup the stimulus is
# inverted: 100 means the light is OFF whereas 0 means the
# light is 100% on. To make sure the presentation during the
# experiment is correct all values of the time_dep_stim_file
# are inverted. As this is confusing for the data analysis (
# 100 goes into the stimulation file while 0 comes out) it
# needs to be inverted again. This happens here:
if self.high_power_led_bool and self.time_dep_stim_file is not None:
self.time_dep_stim_file = self.pwm_range - \
self.time_dep_stim_file
self.save_n_plot()
def save_n_plot(self):
# chdir(self.path)
print('saving')
if self.save_heads_npy:
np.save('heads.npy', self.heads)
if self.save_tails_npy:
np.save('tails.npy', self.tails)
if self.save_centroids_npy:
np.save('centroids.npy', self.centroids)
if self.save_midpoints_npy:
np.save('midpoints.npy', self.midpoints)
if self.save_bbox_npy:
np.save('bounding_boxes.npy', self.bounding_boxes)
if self.loop_time is not None:
np.save('loop_time.npy', self.loop_time)
if self.save_skeleton_npy:
np.save('sm_skeletons.npy', self.image_skel)
if self.save_thresh_npy:
np.save('sm_thresh.npy', self.image_thresh)
np.save('sm_raw.npy', self.image_raw)
if self.save_stim_npy:
# only save the stimulation array if at least one value of
# the stimulation array is non-zero
if len(np.nonzero(self.stimulation)[0]) > 0:
np.save('stimulation.npy', self.stimulation)
if self.newcameramtx is not None:
self.smoothed_background = cv2.undistort(
self.smoothed_background,
self.undistort_mtx,
self.undistort_dst,
None,
self.newcameramtx)
if self.undistort_mtx is not None:
# also save the undistort mtx and dist matrices. It takes
# almost no space (a kb or so) and is very convenient to have
np.savez('undistort_matrices.npz', **{'mtx': self.undistort_mtx,
'dist': self.undistort_dst})
fig = plt.figure(figsize=(12, 12))
ax = fig.add_subplot(111)
ax.axis('off')
ax.imshow(self.smoothed_background, cmap='Greys_r') #, origin='lower') # todo - check if this is correct (different from thc.save!)
ax.scatter(y=self.centroids[0:-1, 0], x=self.centroids[0:-1, 1], color='k',
label='Centroid', s=1, alpha=0.5)
ax.scatter(y=self.heads[0:-1, 0], x=self.heads[0:-1, 1], color='r', label='Heads', s=1, alpha=0.5)
try:
if self.stimulation[0] != 0:
# I think this work but it's ok to keep the blank exception
# as we must not have an error at this stage as it would
# hinder saving the data!
try:
ax.imshow(self.arena, alpha = 0.5, cmap='hot_r')
except:
pass
except ValueError:
# This will fail in case of time dependent stimulus therefore
# try..except
pass
ax.legend()
fig.tight_layout()
fig.savefig('Overview of tracking.png')
if self.arena is not None:
fig = plt.figure(figsize=(6, 6))
ax = fig.add_subplot(111)
x = np.arange(0, self.recording_time, 1 / self.recording_framerate)
ax.plot(x,self.stimulation)
ax.set_ylabel('Stimulation [% of abs max]')
ax.set_xlabel('Time[s]')
fig.savefig('VR Stimulation over time.png')
#except:
# pass
if self.time_dep_stim_file is None:
print('time_dep_stim_file is None')
csv_object = pd.DataFrame({'Frame': np.arange(0, self.centroids.shape[0]),
'Time': self.real_time - self.real_time[0],
'X-Centroid': self.centroids[:, 1],
'Y-Centroid': self.centroids[:, 0],
'X-Head': self.heads[:, 1],
'Y-Head': self.heads[:, 0],
'X-Tail': self.tails[:, 1],
'Y-Tail': self.tails[:, 0],
'X-Midpoint': self.midpoints[:, 1],
'Y-Midpoint': self.midpoints[:, 0],
'Ymin-bbox': self.bounding_boxes[0,:],
'Ymax-bbox': self.bounding_boxes[1,:],
'Xmin-bbox': self.bounding_boxes[2,:],
'Xmax-bbox': self.bounding_boxes[3,:],
'local threshold': self.local_threshold,
'stimulation': self.stimulation,
})
# Originally, we had missing values set to zero but it turns
# out it is better to set it to nan. Do that here
csv_object.loc[csv_object.Time < 0, ['Time',
'X-Centroid',
'Y-Centroid',
'X-Head',
'Y-Head',
'X-Tail',
'Y-Tail',
'X-Midpoint',
'Y-Midpoint',
'Ymin-bbox',
'Ymax-bbox',
'Xmin-bbox',
'Xmax-bbox',
'local threshold',
'stimulation']] = np.nan
csv_object.to_csv(os.getcwd() + '/' + self.datetime + '_data.csv', sep=',', \
columns=['Frame',
'Time',
'X-Centroid',
'Y-Centroid',
'X-Head',
'Y-Head',
'X-Tail',
'Y-Tail',
'X-Midpoint',
'Y-Midpoint',
'Ymin-bbox',
'Ymax-bbox',
'Xmin-bbox',
'Xmax-bbox',
'local threshold',
'stimulation'
],
na_rep='NaN')
else:
print('time_dep_stim_file is not None')
csv_object = pd.DataFrame({'Frame': np.arange(0, self.centroids.shape[0]),
'Time': self.real_time - self.real_time[0],
'X-Centroid': self.centroids[:, 1],
'Y-Centroid': self.centroids[:, 0],
'X-Head': self.heads[:, 1],
'Y-Head': self.heads[:, 0],
'X-Tail': self.tails[:, 1],
'Y-Tail': self.tails[:, 0],
'X-Midpoint': self.midpoints[:, 1],
'Y-Midpoint': self.midpoints[:, 0],
'Ymin-bbox': self.bounding_boxes[0, :],
'Ymax-bbox': self.bounding_boxes[1, :],
'Xmin-bbox': self.bounding_boxes[2, :],
'Xmax-bbox': self.bounding_boxes[3, :],
'local threshold': self.local_threshold,
# we're now saving the actually presented value
# This makes sure timestamps are as expected.
'Stim Ch1': self.stimulation[:,0],
'Stim Ch2': self.stimulation[:,1],
'Stim Ch3': self.stimulation[:,2],
'Stim Ch4': self.stimulation[:,3]
})
# Originally, we had missing values set to zero but it turns
# out it is better to set it to nan. Do that here
csv_object.loc[csv_object.Time < 0, ['Time',
'X-Centroid',
'Y-Centroid',
'X-Head',
'Y-Head',
'X-Tail',
'Y-Tail',
'X-Midpoint',
'Y-Midpoint',
'Ymin-bbox',
'Ymax-bbox',
'Xmin-bbox',
'Xmax-bbox',
'local threshold',
'Stim Ch1',
'Stim Ch2',
'Stim Ch3',
'Stim Ch4']] = np.nan
csv_object.to_csv(os.getcwd() + '/' + self.datetime + '_data.csv', sep=',', \
columns=['Frame',
'Time',
'X-Centroid',
'Y-Centroid',
'X-Head',
'Y-Head',
'X-Tail',
'Y-Tail',
'X-Midpoint',
'Y-Midpoint',
'Ymin-bbox',
'Ymax-bbox',
'Xmin-bbox',
'Xmax-bbox',
'local threshold',
'Stim Ch1',
'Stim Ch2',
'Stim Ch3',
'Stim Ch4'
],
na_rep='NaN')
print('saved in ' + os.getcwd() + '\\' + self.datetime
+ 'data.csv')
if self.heuristic_data is not None:
csv_object_heuristics = pd.DataFrame({
'filled area in pixel': self.heuristic_data[0,:],
'filled area divided by px/mm': self.heuristic_data[1,:],
'major over minor axis': self.heuristic_data[2,:],
'eccentricity': self.heuristic_data[3,:],
'skeleton length in px': self.heuristic_data[4,:],
'skeleton length in mm': self.heuristic_data[5,:],
'speed in pixel per frame': self.heuristic_data[6,:],
'speed in mm per frame': self.heuristic_data[7,:],
'speed in mm per second' : self.heuristic_data[8,:]
})
csv_object_heuristics.to_csv(os.getcwd() + '/'
+ self.datetime
+ '_heuristics.csv', sep=',',
columns=['filled area in pixel',
'filled area divided by px/mm',
'major over minor axis',
'eccentricity',
'skeleton length in px',
'skeleton length in mm',
'speed in pixel per frame',
'speed in mm per frame',
'speed in mm per second'])
# read experiment settings
with open('experiment_settings.json', 'r') as file:
experiment_info = json.load(file)
experiment_info['Time delay due to Animal Detection[s]'] = \
self.time_delay_due_to_animal_detection
# save (overwrite) the experiment_settings file again
with open(('experiment_settings.json'), 'w') as file:
json.dump(experiment_info, file, sort_keys=True, indent=4)
[docs]def show_vr_arena_update_error(recording_framerate,
vr_update_rate):
"""
This function warns the user that an incompatible frame rate/dynamic
arena update frequency has been chosen.
For example, if the frame rate is 30frames per second and
the update rate is 10Hz the arena will be updated every 3rd
frame (30/10=3). This is of course possible.
If the frame rate is 30frames per second and the update rate is
set to 20Hz the arena should be updated every 1.5th frame (
30/20=1.5). This is not possible. What will happen is that for
every other frame the arena will be updated for each frame and
the other it will take two frames to update. This will lead to a
mean of 1.5 but it's not regular, of course.
As this can easily lead to bad data being produced without the
user knowing (no explict error will be thrown) this function
informs the user of the mistake so that they can change the
settings to either 40frames per second to keep the 20Hz update
rate or to change the update rate.
"""
top = tk.Toplevel()
tk.Label(top, text='You have chosen a frame rate of ' + \
repr(recording_framerate) + \
'and a dynamic arena update rate of ' +
repr(vr_update_rate) +
'.\nThis would lead to an update every ' + \
repr(round(recording_framerate / vr_update_rate,2)) +
' frames which can not be delivered! \n'
'Please choose a frame rate that is '
'compatible with the update rate or vice versa.'
).pack()
tk.Button(top, text="OK", command=top.destroy).pack(pady=5)
def undistort_points(point_col, point_row, undistort_mtx, undistort_dst, newcameramtx):
"""
Uses the opencv2 library to undistort a pair of x/y points
"""
# The input needs to come in this particular format
point_of_interest = np.zeros((1,1,2), dtype=np.float32)
point_of_interest[0, 0,:] = point_col, point_row
# Now correct the centroid position
corrected_points = cv2.undistortPoints(point_of_interest,
undistort_mtx,
undistort_dst,
None,
newcameramtx)
cor_point_col = corrected_points[0][0][1]
cor_point_row = corrected_points[0][0][0]
return(cor_point_col, cor_point_row)