Source code for start_GUI

__author__ = 'David Tadres'
__project__ = 'PiVR'
__version__ = '1.7.10'
__date__ = '1st of December, 2023'

#####################################################
# This seems to be necessary in order to run on MacOS
import matplotlib
matplotlib.use('TkAgg')
#####################################################

# general imports
import json
import os
import time
import tkinter as tk
from tkinter import font as tkfont
from tkinter import filedialog, simpledialog
from glob import glob
import numpy as np
import pandas as pd
from PIL import Image, ImageTk
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.figure import Figure
from matplotlib.patches import Circle
import calendar
import imageio
import subprocess
from pathlib import Path

# Wrap the undistort functionality to not break older installations
# when they update.
try:
    import cv2
    CV2_INSTALLED = True
except ModuleNotFoundError:
    CV2_INSTALLED = False

# local modules
import control_file
import distance_configuration_module
import VR_drawing_board
import analysis_scripts
import output_channels
import visualize_tracked_experiment
import initialize_image_data
import image_data_handling
# testing purpose
import record_videos_and_image_sequences
import multi_animal_tracking

# SOME CONSTANTS:

# depending on the sampling frequency (set when starting the program
# in the terminal when starting the pigpio daemon)
# you need to comment/uncomment the correct variable
MAX_PWM_FREQ = 40000 # sampling freq = 1us
# MAX_PWM_FREQ = 20000 # sampling freq = 2us
# MAX_PWM_FREQ = 10000 # sampling freq = 4us
# MAX_PWM_FREQ = 8000 # sampling freq = 5us
# MAX_PWM_FREQ = 5000 # sampling freq = 8us
# MAX_PWM_FREQ = 4000 # sampling freq = 10us

# The numbers between 0 and the number defined here are used to
# define the dutycyle of the GPIO. The maximum value for the
# PWM_RANGE is 40000:
# http://abyz.me.uk/rpi/pigpio/python.html#set_PWM_range
PWM_RANGE = 100

# Here is a switch for simulating the Raspberry Pi experience on
# another computer
virtual_raspberry = False

# this try-except statement checks if the processor is a ARM processor
# (used by the Raspberry Pi) or not.
# Since this command only works in Linux it is caught using
# try-except otherwise it's throw an error in a Windows system.
try:
    if os.uname()[4][:3] == 'arm':
        # This will yield True for both a Raspberry and for M1 Chip
        # Apple devices.
        # Use this code snippet
        # (from https://raspberrypi.stackexchange.com/questions/5100/detect-that-a-python-program-is-running-on-the-pi)
        import re
        CPUINFO_PATH = Path("/proc/cpuinfo")
        if CPUINFO_PATH.exists():
            with open(CPUINFO_PATH) as f:
                cpuinfo = f.read()
            if re.search(r"^Model\s*:\s*Raspberry Pi", cpuinfo, flags=re.M) is not None:
                # if True, is Raspberry Pi
                RASPBERRY = True
                LINUX = True
        else: # Test if one more intendation necessary or not. On Varun's computer
            # is Apple M1 chip (or other Arm CPU device).
            RASPBERRY = False
            LINUX = True
    else:
        # is either Mac or Linux
        RASPBERRY = False
        LINUX = True

    DIRECTORY_INDICATOR = '/'
except AttributeError:
    # is Windows
    RASPBERRY = False
    LINUX = False
    DIRECTORY_INDICATOR = '\\'

# Keep a reference to the path of the PiVR software.
SOFTWARE_PATH = os.path.abspath(os.getcwd())

if RASPBERRY:
    import picamera
    import pigpio

# The snippet below will try to read a file called
# 'saved_variables.json' in the same folder as this GUI is. If it
# finds it, it will try to read all the settings the user had when
# the program was closed
previous_variables = None
try:
    if LINUX:
        with open(('/'.join(os.path.realpath(__file__).split('/')[:-1])
                   + '/saved_variables.json'), 'r') as file:
            previous_variables = json.load(file)
    else:
        with open(('\\'.join(os.path.realpath(__file__).split('\\')[:-1])
                   + '\\saved_variables.json'), 'r') as file:
            previous_variables = json.load(file)
except FileNotFoundError:
    pass

# In addition, the software looks for a second json file:
# "list_of_available_organisms.json". This file contains parameters
# used by PiVR to adapt the tracking algorithm to specific animals.
try:

    if LINUX:
        with open(('/'.join(os.path.realpath(__file__).split('/')[:-1])
                   + '/list_of_available_organisms.json'),
                  'r') as file:
            organisms_and_heuristics = json.load(file)
    else:
        with open(('\\'.join(os.path.realpath(__file__).split('\\')[:-1])
                   + '/list_of_available_organisms.json'),
                  'r') as file:
            organisms_and_heuristics = json.load(file)

except FileNotFoundError:
    # without this file we can't really do anything - so quit the
    # program and ask the user to fix the problem
    tk.messagebox.showerror('File Missing',
            'Cannot find list_of_available_organisms.json in the '
            'main program folder.'
            '\nPlease restore the file or get a new copy of the program')
    import sys

    sys.exit()

#######################################################################
# Grab current git branch and git hash to save in
# experiment_settings.json
try:
    if RASPBERRY:
        CURRENT_GIT_BRANCH = subprocess.check_output(["git", "symbolic-ref", "--short", "HEAD"]).strip().decode()
    else:
        CURRENT_GIT_BRANCH = subprocess.check_output(["git", "branch", "--show-current"]).strip().decode()

    CURRENT_GIT_HASH = subprocess.check_output(["git", "rev-parse", "HEAD"]).strip().decode()
except:
    # This is necessary as readthedocs throws an error
    CURRENT_GIT_BRANCH = 'Not defined'
    CURRENT_GIT_HASH = 'Not defined'
# Create one string containing all the information
VERSION_INFO = 'v' + __version__ + \
                  ', git branch: ' +  CURRENT_GIT_BRANCH + \
                  ', git hash: ' + CURRENT_GIT_HASH
#######################################################################

class Camera():
    def __init__(self):
        """
        This class initializes the Raspberry Pi camera.

        This is done once while the software PiVR class is preparing
        the GUI if run on a Raspberry Pi
        """

        if RASPBERRY:

            # Here we initialize the camera
            self.cam = picamera.PiCamera()
            # turn off the small signal LED on the PiCamera as it
            # might be seen by the animal
            self.cam.led = False
            # At least for the RPi it seems the white balance needs to be set manually
            self.cam.awb_mode = 'tungsten'
            # Exposure mode, explanation here:
            # http://picamera.readthedocs.io/en/release-1.10/api_camera.html
            if previous_variables is not None:
                self.cam.shutter_speed = \
                    previous_variables['exposure time']
                # if exposure time is anything but 0 it has been set
                # while autoexposure was turned off, i.e. because the
                # user wanted to slightly overexpose the image. In
                # that case turn autoexposure off from the start
                if self.cam.shutter_speed != 0:
                    self.cam.exposure_mode = 'off'
                else:
                    # if the exposure time was 0 it means that the
                    # autoexposure mode was on when saving the
                    # variables. Reflect that by turning
                    # autoexposure on
                    self.cam.exposure_mode= 'auto'

            print('initalized')
        else:
            self.cam = None

[docs] class PiVR(tk.Tk): """ This class initializes the GUI the user will see. There are several different frames (e.g. "Tracking" vs "Virtual Arena") that are all created differently. To do this, the "PiVR" class calls a number of other classes. To help with this the following three "helper" classes are important: #) "CommonVariables" contains variables that are true between frames, #) "SubFrames" helps with the creation of the different frames and finally #) "CommonFunction" which contains functions that are called in different frames. The actual frames (e.g. "TrackingFrame") are then created by "constructor" classes which call different components of the three classes described above. The "helper" classes are necessary as they can save variables and functions between different frames (similar to global variables). For example, if the user would select at particular folder to save all the experimental data, the "CommonVariables" class saves this folder when the user is then switching from, for example, the "Tracking" frame to the "Virtual Arena" frame. """ def __init__(self, *args, **kwargs): """ """ tk.Tk.__init__(self, *args, **kwargs) self.title_font = tkfont.Font(family='Helvetica', size=18) if LINUX: try: self.wm_iconbitmap(bitmap = '@pics/VRLarvaBanana32px.xbm') except: # it's a tkinter.TclError when opening on the Pi # with the shortcut pass else: self.wm_iconbitmap('pics\\VRLarvaBanana32px.ico') if not CV2_INSTALLED: self.wm_title('PiVR ' + __version__ + ', noCV2') else: self.wm_title('PiVR ' + __version__) #self.wm_title('PiVR ' + __version__) # the container is where we'll stack a bunch of frames on top # of each other, then the one we want visible will be raised # above the others container = tk.Frame(self) container.grid(row=0, column=0) container.grid_rowconfigure(0, weight=1) container.grid_columnconfigure(0, weight=1) # Initialize the camera instance - this is the only time this # happens! # If error, e.g. camera not connected/not turned on the # script fails here. So if we want to give a more meaningful # error message, this would be the place to raise tkinter # exception and let the user know what to do! camera_class_pointer = Camera() # Call the common classes first - every class that wants to # use any of those variables will get to work with this # variable which has the whole class of variables used by the # GUI. # After initalization, these can be called from inside other # classes. self.all_common_variables = \ CommonVariables(camera_class=camera_class_pointer) self.all_common_functions = \ CommonFunctions(camera_class=camera_class_pointer, controller=self) self.sub_frames = \ SubFrames(camera_class=camera_class_pointer, controller=self) # figure out where the script itself is. if LINUX: # todo - test with a mac! self.path_of_program = \ (os.path.realpath(__file__)[0:-len( os.path.realpath(__file__).split('/')[-1])]) #print(self.path_of_program) else: # Assume it's Windows self.path_of_program = \ (os.path.realpath(__file__)[0:-len( os.path.realpath(__file__).split('\\')[-1])]) print(self.path_of_program) #print(pathlib.Path.cwd()) # Todo - for readability can # try to change to a pathlib.Path - but need to change a # lot of strings that use self.path_of_program # Now create an empty dictionary. This dictionary will be # used to address the newly created tkinter frames self.frames = {} # This loop takes the list of the "constructor" classes if RASPBERRY or virtual_raspberry: for F in (TrackingFrame, VirtualRealityFrame, DynamicVirtualRealityFrame, VideoFrame, FullFrameImagesRecording, TimelapseRecording, #FixMetadata, SimulateOnlineExperiment,): # previously included: , TryFastCam, FastDownscaleSave, # AnalyzeVideoExperiment # This will save the name of the class (e.g. # TrackingFrame) so that it can be used as a # dictionary key later on page_name = F.__name__ # Now construct the different frames using the # constructor classes. All frames are constructed in # the "container" tk.Frame(). frame = F(parent=container, controller=self, camera_class=camera_class_pointer) # Here the "frames" dictionary is filled with the # different frame instances self.frames[page_name] = frame # put all of the pages in the same location; # the one on the top of the stacking order # will be the one that is visible. frame.grid(row=0, column=0, sticky="nsew") # In case the software is started not on the PiVR (and the # virtual_raspberry boolean is turned to False) a slightly # different frame dictionary is created. elif not virtual_raspberry: for F in (TrackedAnalysisFrame, ImageDataHandling, #FixMetadata, DisplayTrackedImage, MultiAnimalTracking, SimulateOnlineExperiment, ): # This will save the name of the class (e.g. # TrackingFrame) so that it can be used as a # dictionary key later on page_name = F.__name__ # Now construct the different frames using the # constructor classes. All frames are constructed in # the "container" tk.Frame(). frame = F(parent=container, controller=self, camera_class=camera_class_pointer) # Here the "frames" dictionary is filled with the # different frame instances self.frames[page_name] = frame # put all of the pages in the same location; # the one on the top of the stacking order # will be the one that is visible. frame.grid(row=0, column=0, sticky="nsew") # After constructing all the individual frames, an immutable # menubar on top of the window is created self.menubar = tk.Menu(self) # The creation of the menubar goes from left to right # We start with the "File" menu that enables the user to do # globally valid actions, such as closing the window or # updating the software! menu_file = tk.Menu(self.menubar, tearoff=0) self.menubar.add_cascade(label="File", menu=menu_file) # To this Menu-object, a number of different options # attached to functions are created: menu_file.add_command( label="Save settings", command=lambda: self.all_common_functions.quit_sequence( save=True, exit=False)) menu_file.add_command( label="Save and exit", command=lambda: self.all_common_functions.quit_sequence( save=True, exit=True)) menu_file.add_command( label='Discard changes and exit', command=lambda: self.all_common_functions.quit_sequence( save=False, exit=True)) menu_file.add_command( label='About/Version', command=self.all_common_functions.software_info) menu_file.add_command( label='Update software', command=self.all_common_functions.update_software) # In the "Recording" menu, the user can choose between the # different recording options! menu_recording = tk.Menu(self.menubar, tearoff=0) self.menubar.add_cascade(label="Recording", menu=menu_recording) menu_recording.add_command( label="Tracking", command=lambda: self.show_frame('TrackingFrame')) menu_recording.add_command( label="VR Arena", command=lambda: self.show_frame('VirtualRealityFrame')) menu_recording.add_command( label="Dynamic VR Arena", command=lambda: self.show_frame( 'DynamicVirtualRealityFrame')) menu_recording.add_command( label='Full Frame Recording', command=lambda: self.show_frame('FullFrameImagesRecording')) menu_recording.add_command( label='Timelapse Recording', command=lambda: self.show_frame('TimelapseRecording') ) menu_recording.add_command( label="Video", command=lambda: self.show_frame('VideoFrame')) # In the "Analysis" menu, the user can choose different # tools option menu_tools = tk.Menu(self.menubar, tearoff=0) self.menubar.add_cascade(label="Tools", menu=menu_tools) menu_tools.add_command( label='Analysis', command=lambda: self.show_frame('TrackedAnalysisFrame')) menu_tools.add_command( label='Image data handling', command=lambda: self.show_frame('ImageDataHandling') ) #menu_tools.add_command( # label='Fix Metadata', # command=lambda: self.show_frame('FixMetadata')) menu_tools.add_command( label='Display tracked experiment', command=lambda: self.show_frame('DisplayTrackedImage')) menu_tools.add_command( label='Multi-Animal Tracking', command=lambda: self.show_frame('MultiAnimalTracking')) menu_tools.add_command( label='Draw VR arena', command=self.all_common_functions.draw_VR_arena) menu_tools.add_command( label='Undistort, new lens', command=self.all_common_functions.undistort_new_lens_menu) # Here goes the "Debug" menu - currently the only option is # to simulate an online experiment menu_simulate = tk.Menu(self.menubar, tearoff=0) self.menubar.add_cascade(label='Debug', menu=menu_simulate) menu_simulate.add_command( label='Simulate Online Tracking', command=lambda : self.show_frame('SimulateOnlineExperiment')) #menu.add_command(label='Analyze Full Frame Image Experiment', # command=lambda: self.show_frame('AnalyzeFullFrameImageExperiment')) #menu.add_command(label='Analyze Video Experiment', # command=lambda: self.show_frame('AnalyzeVideoExperiment')) # Todo - decide if delete or keep. Includes the classes and python files! #menu = tk.Menu(self.menubar, tearoff=0) #self.menubar.add_cascade(label='Development', menu=menu) #menu.add_command(label='fast vid', command=lambda: self.show_frame('TryFastCam')) #menu.add_command(label='fast downscale', command=lambda: self.show_frame('FastDownscaleSave')) # Finally, the "Options' Menu is defined. Quite a few # essential options are in here such as menu_options = tk.Menu(self.menubar, tearoff=0) self.menubar.add_cascade(label='Options', menu=menu_options) # Add a menu to select organism using a dropdown menu which # allows for maximum flexibility as the end user can and # should change the list of organisms! organism_menu = tk.Menu(self, tearoff=0) menu_options.add_cascade(label='Select Organism', menu=organism_menu) # As the content of the menu option are dependent on what was # read in "available_organisms.json", use a loop to populate it for organism_names in organisms_and_heuristics: organism_menu.add_radiobutton( label=organism_names, value=organism_names, variable=self.all_common_variables.model_organism_variable) # add radiobutton to turn debug mode on or off debug_menu = tk.Menu(self, tearoff=0) menu_options.add_cascade( label='Turn Debug Mode...', menu = debug_menu) debug_options = ['OFF', 'ON'] for debug_option_names in debug_options: debug_menu.add_radiobutton( label=debug_option_names, value=debug_option_names, variable=self.all_common_variables.debug_mode_var) # The following menu items call functions found in the # all_common_functions class menu_options.add_command( label='Define Pixel/mm', command=self.all_common_functions.distance_configuration_func) menu_options.add_command( label='Optimize Image', command=self.all_common_functions.camera_controls_func) menu_options.add_command( label='Select Output Channels', command=self.all_common_functions.select_output_channels) menu_options.add_command( label='High Power LEDs', command=self.all_common_functions.high_power_LED) menu_options.add_command( label='Animal Detection Method', command=self.all_common_functions.animal_detection_method_func) menu_options.add_command( label='VR Stimulation Point', command=self.all_common_functions.vr_body_part_stim_func) menu_options.add_command( label='Animal Color', command=self.all_common_functions.select_signal) menu_options.add_command( label='Output Files', command=self.all_common_functions.output_files_func) menu_options.add_command( label='Undistort Options', command=self.all_common_functions.undistort_online_func) # And finally, this menu allows the user to design their own # virtual arena using the drawing board. #menu_VR_options = tk.Menu(self.menubar, tearoff=0) #self.menubar.add_cascade(label='VR', menu=menu_VR_options) #menu_VR_options.add_command( # label='Draw VR arena', # command=self.all_common_functions.draw_VR_arena) top = self.winfo_toplevel() top.config(menu=self.menubar) # Here, a number of the just created menu options are # disabled depending on the platform the PiVR software is # being used. if RASPBERRY or virtual_raspberry: menu_tools.entryconfig( "Analysis", state="disabled") menu_tools.entryconfig( "Display tracked experiment", state="disabled") #menu_tools.entryconfig( # "Post-Hoc Single Animal Analysis", state="disabled") menu_tools.entryconfig( "Multi-Animal Tracking", state="disabled") menu_tools.entryconfig( "Draw VR arena", state="disabled") menu_tools.entryconfig( "Image data handling", state="disabled") #menu_tools.entryconfig( # 'Undistort, new lens', state='disabled') menu_simulate.entryconfig( "Simulate Online Tracking", state="disabled") menu_options.entryconfig( "Turn Debug Mode...", state="disabled") elif not virtual_raspberry: #menu_file.entryconfig( # "Update software", state="disabled") menu_recording.entryconfig( "Tracking", state="disabled") menu_recording.entryconfig( "VR Arena", state="disabled") menu_recording.entryconfig( "Dynamic VR Arena", state="disabled") menu_recording.entryconfig( "Full Frame Recording", state="disabled") menu_recording.entryconfig( "Video", state="disabled") menu_options.entryconfig( "Select Organism", state="disabled") menu_options.entryconfig( "Define Pixel/mm", state="disabled") menu_options.entryconfig( "Optimize Image", state="disabled") menu_options.entryconfig( "Select Output Channels", state="disabled") menu_options.entryconfig( "High Power LEDs", state="disabled") #menu_options.entryconfig( # "Animal Color", state="disabled") #menu_options.entryconfig( # "Output Files", state="disabled") if not CV2_INSTALLED: menu_options.entryconfig( "Undistort Options", state="disabled") if not CV2_INSTALLED: menu_tools.entryconfig( 'Undistort, new lens', state='disabled') if previous_variables is not None and 'opencv_information_box' in previous_variables: self.all_common_variables.opencv_information_box = \ previous_variables['opencv_information_box'] else: self.all_common_variables.opencv_information_box = True if self.all_common_variables.opencv_information_box: self.all_common_variables.opencv_information_box = \ tk.messagebox.askyesno(title='OpenCV not installed', message='With v1.7.0 released in October 2021 ' 'PiVR can use OpenCV to undistort ' 'the image during tracking.\n' 'To take advantage of this feature ' 'please install opencv.\n' 'The easiest way to install opencv is ' 'to wipe the SD ' 'card and re-install everything, ' 'including the OS from scratch.\n' 'For more information, please visit ' 'www.pivr.org and go to "Advanced Topics"' '--> "Create your own undistort files".\n\n' 'Please press "YES" if you want to see ' 'this message again.\n' 'Please press "NO" if you do not want ' 'to see this message anymore' ) # Whenever the software starts, display the last shown frame if previous_variables is not None and 'Window' in previous_variables: self.show_window = previous_variables['Window'] else: if RASPBERRY or virtual_raspberry: self.show_window ='TrackingFrame' else: self.show_window = 'TrackedAnalysisFrame' try: self.show_frame(self.show_window) except KeyError: # # this happens when the user opens the software for the # first time on a PC (non-virtual RPi) if RASPBERRY or virtual_raspberry: self.show_window = 'TrackingFrame' else: self.show_window ='TrackedAnalysisFrame' self.show_frame(self.show_window) # Here, the term protocol refers to the interaction between # the application and the window manager. The most commonly # used protocol is called WM_DELETE_WINDOW, and is used to # define what happens when the user explicitly closes a # window using the window manager. # (http://effbot.org/tkinterbook/tkinter-events-and-bindings.htm#protocols) def on_closing(): if tk.messagebox.askokcancel("Quit without " "\n saving", "Do you want to quit " "\nwithout saving your " "settings?"): self.destroy() # root.destroy() self.protocol("WM_DELETE_WINDOW", on_closing)
[docs] def show_frame(self, page_name): """ The function above is called when the user presses on a different frame. It takes the selected frame and raises it to the top. In addition, it saves the current page name which is needed to pass around as a reference to the currently active frame when calling functions that are generally called, such as starting an experiment! """ frame = self.frames[page_name] frame.tkraise() # also save which window we currently have up - needed for referencing self.current_window = page_name print(self.current_window) # finally, for user friendliness - if the user has been # presenting a virtual reality it is shown in the preview # window. # If user now goes to, e.g. 'Tracking' tab, this is not # intuitive as this would not be presented. # Therefore check here if the current "page_name" is # "VirtualRealityFrame" - if yes, check if an arena has been # chosen and present it in the preview window. # If the current page_name is NOT "VirtualRealityFrame", # remove any preview if page_name == "VirtualRealityFrame": # check if arena has been selected if self.all_common_variables.overlay is not None: self.all_common_functions.preview_overlay_func() else: # # Remove any overlay on the preview window! if self.all_common_variables.overlay_image is not None: try: self.all_common_variables.cam.remove_overlay( self.all_common_variables.overlay) except Exception as ex: # This happens when selecting a VR arena, # then selecting e.g. Video and then selecting # tracking or some other window that is not # VirtualRealityFrame. #print('Tried removing overlay after removing it ' # 'already:') #print(ex) pass
[docs] def call_start_experiment_function(self, page_name): """ The function above will be called by the button that says 'start experiment'. It will look in the currently active frame for a function called 'start_experiment_function'. """ self.frames[page_name].start_experiment_function()
[docs] def access_subframes(self, page_name): """ The function above returns the instance of the currently active (=in foreground) window """ return (self.frames[page_name])
class CommonVariables(): """ Here are all the variables stored that are being used in the GUI. This class is called by the controller (SmallVR) and referenced. All subsequent calls to any of these variables goes through the controller. I hope this makes the code easier to understand on a high level and also possible to add new windows without fully understanding all the other windows. """ def __init__(self, camera_class): self.camera_class = camera_class self.cam = self.camera_class.cam # previously was in 'if not RASPBERRY' - might lead to a bug out here self.image_path = tk.StringVar() self.image_path.set('No path selected yet') # todo check in previous variables? Maybe? if not RASPBERRY: self.cam = None if previous_variables is not None \ and 'Animal Signal' in previous_variables: self.signal=previous_variables['Animal Signal'] else: self.signal = 'dark' # need to define early (before setting output variables) if the setup used is using high-powered LEDs or strips self.high_power_LEDs_bool = tk.IntVar() if previous_variables is not None: try: self.high_power_LEDs_bool.set(previous_variables['High Power LEDs']) except KeyError: self.high_power_LEDs_bool.set(0) else: self.high_power_LEDs_bool.set(0) # if RASPBERRY or virtual_raspberry: # I like to give this output a very different name than all the other outputs as this one is only one of two # GPIO pins on the Raspberry Pi that is capable of true hardware PWM: 13 and 18 (the latter not tested by me) self.backlight_intensity_variable = tk.IntVar() if previous_variables != None and 'Background dutycycle' in previous_variables: self.backlight_intensity_value = previous_variables['Background dutycycle'] else: self.backlight_intensity_value = 0 self.backlight_intensity_variable.set(self.backlight_intensity_value) # Another backlight channel. This can be needed if illumination is done via IR on the backlight #1 but one also # needs a constant color, e.g. white or blue LEDs, to be on the whole experiment. self.backlight_two_intensity_variable = tk.IntVar() if previous_variables != None and 'Background 2 dutycycle' in previous_variables: self.backlight_two_intensity_value = previous_variables['Background 2 dutycycle'] else: self.backlight_two_intensity_value = 0 self.backlight_two_intensity_variable.set(self.backlight_two_intensity_value) # Goal here is to give the user relativly free reign over how to use the GPIOs. In total we have four channels # (including the background channel above). It is, however, possible to use the four channels independently # if no PWM is needed for the background illumination. These four channels are defined below. self.channel_one_variable = tk.IntVar() if previous_variables != None: try: self.channel_one_dutycycle = previous_variables['Channel 1 dutycycle'] except: self.channel_one_dutycycle = 0 else: self.channel_one_dutycycle = 0 # Channel 2 self.channel_two_variable = tk.IntVar() if previous_variables != None: try: self.channel_two_dutycycle = previous_variables['Channel 2 dutycycle'] except: self.channel_two_dutycycle = 0 else: self.channel_two_dutycycle = 0 self.channel_three_variable = tk.IntVar() if previous_variables != None: try: self.channel_three_dutycycle = previous_variables['Channel 3 dutycycle'] except: self.channel_three_dutycycle = 0 else: self.channel_three_dutycycle = 0 self.channel_four_variable = tk.IntVar() if previous_variables != None: try: self.channel_four_dutycycle = previous_variables['Channel 4 dutycycle'] except: self.channel_four_dutycycle = 0 else: self.channel_four_dutycycle = 0 # Try to assign the channel variables (including frequencies) from the previous variables json files if previous_variables != None: try: self.background = previous_variables['Background'] self.background_two = previous_variables['Background 2'] self.channel_one = previous_variables['Channel 1'] self.channel_two = previous_variables['Channel 2'] self.channel_three = previous_variables['Channel 3'] self.channel_four = previous_variables['Channel 4'] except: self.background = [[18, 40000]] self.background_two = [] self.channel_one = [[17, MAX_PWM_FREQ], [27, MAX_PWM_FREQ], [13, MAX_PWM_FREQ]] self.channel_two = [] self.channel_three = [] self.channel_four = [] else: self.background = [[18, 40000]] self.background_two = [] self.channel_one = [[17, MAX_PWM_FREQ], [27, MAX_PWM_FREQ], [13, MAX_PWM_FREQ]] self.channel_two = [] self.channel_three = [] self.channel_four = [] if RASPBERRY: # accesses the local Pi's GPIO and binds it to the variable pwm_object. In order to access the GPIOs we # can just call the variable self.pwm_object = pigpio.pi() # if not hardware PWM the range of values will be 0-40000 (standard would be 0-255) self.pwm_range = PWM_RANGE # for each of the channels, look which gpios are assigned and set the frequency at this point for i in range(len(self.background)): if self.background[i][1] > MAX_PWM_FREQ: self.pwm_object.hardware_PWM(self.background[i][0], self.background[i][1], self.backlight_intensity_value) else: # this should almost never be used - can't really think of a good use of this! self.pwm_object.set_PWM_frequency(self.background[i][0], self.background[i][1]) self.pwm_object.set_PWM_range(self.background[i][0], self.pwm_range) self.pwm_object.set_PWM_dutycycle(self.background[i][0], int(round(self.backlight_intensity_value*(self.pwm_range/1000000)))) # for backlight_two: for i in range(len(self.background_two)): if self.background_two[i][1] > MAX_PWM_FREQ: self.pwm_object.hardware_PWM(self.background_two[i][0], self.background_two[i][1], self.backlight_intensity_value) else: # this should almost never be used - can't really think of a good use of this! self.pwm_object.set_PWM_frequency(self.background_two[i][0], self.background_two[i][1]) self.pwm_object.set_PWM_range(self.background_two[i][0], self.pwm_range) self.pwm_object.set_PWM_dutycycle(self.background_two[i][0], int(round(self.backlight_two_intensity_value*(self.pwm_range/1000000)))) for i in range(len(self.channel_one)): if self.channel_one[i][1] > MAX_PWM_FREQ: self.pwm_object.hardware_PWM(self.channel_one[i][0], self.channel_one[i][1], self.channel_one_dutycycle) else: # this should almost never be used - can't really think of a good use of this! self.pwm_object.set_PWM_frequency(self.channel_one[i][0], self.channel_one[i][1]) self.pwm_object.set_PWM_range(self.channel_one[i][0], self.pwm_range) # if high power LED bool is True invert the scale - zero will then be 40000 which is off and # vice versa if self.high_power_LEDs_bool.get(): self.pwm_object.set_PWM_dutycycle(self.channel_one[i][0], int(self.pwm_range - self.channel_one_dutycycle)) else: self.pwm_object.set_PWM_dutycycle(self.channel_one[i][0], self.channel_one_dutycycle) for i in range(len(self.channel_two)): if self.channel_two[i][1] > MAX_PWM_FREQ: self.pwm_object.hardware_PWM(self.channel_two[i][0], self.channel_two[i][1], self.channel_two_dutycycle) else: # this should almost never be used - can't really think of a good use of this! self.pwm_object.set_PWM_frequency(self.channel_two[i][0], self.channel_two[i][1]) self.pwm_object.set_PWM_range(self.channel_two[i][0], self.pwm_range) if self.high_power_LEDs_bool.get(): # if high power LED bool is True invert the scale - zero will then be 40000 which is off and # vice versa self.pwm_object.set_PWM_dutycycle(self.channel_two[i][0], int(self.pwm_range - self.channel_two_dutycycle)) else: self.pwm_object.set_PWM_dutycycle(self.channel_two[i][0], self.channel_two_dutycycle) for i in range(len(self.channel_three)): if self.channel_three[i][1] > MAX_PWM_FREQ: self.pwm_object.hardware_PWM(self.channel_three[i][0], self.channel_three[i][1], self.channel_three_dutycycle) else: # this should almost never be used - can't really think of a good use of this! self.pwm_object.set_PWM_frequency(self.channel_three[i][0], self.channel_three[i][1]) self.pwm_object.set_PWM_range(self.channel_three[i][0], self.pwm_range) if self.high_power_LEDs_bool.get(): # if high power LED bool is True invert the scale - zero will then be 40000 which is off and # vice versa self.pwm_object.set_PWM_dutycycle(self.channel_three[i][0], int(self.pwm_range - self.channel_three_dutycycle)) else: self.pwm_object.set_PWM_dutycycle(self.channel_three[i][0], self.channel_three_dutycycle) for i in range(len(self.channel_four)): if self.channel_four[i][1] > MAX_PWM_FREQ: self.pwm_object.hardware_PWM(self.channel_four[i][0], self.channel_four[i][1], self.channel_four_dutycycle) else: # this should almost never be used - can't really think of a good use of this! self.pwm_object.set_PWM_frequency(self.channel_four[i][0], self.channel_four[i][1]) self.pwm_object.set_PWM_range(self.channel_four[i][0], self.pwm_range) if self.high_power_LEDs_bool.get(): # if high power LED bool is True invert the scale - zero will then be 40000 which is off and # vice versa self.pwm_object.set_PWM_dutycycle(self.channel_four[i][0], int(self.pwm_range - self.channel_four_dutycycle)) else: self.pwm_object.set_PWM_dutycycle(self.channel_four[i][0], self.channel_four_dutycycle) #print(self.pwm_object.get_PWM_frequency(STIM_GPIO_1)) # In the next line the variable that will be bound to the menu where the user can choose the resolution # is created. self.resolution_variable = tk.StringVar() self.available_resolution = ('640x480', '1024x768', '1296x972', '1920x1080', #'2592x1944' This throws a 'picamera.exc.PiCameraMMALError: Failed to # enable component: Out of resources' error. # Most likely due to GPU memory: # https://picamera.readthedocs.io/en/release-1.4/fov.html ) # Test if the file that saved the previous settings exists if previous_variables is not None: try: # check if the user used the 640x480 resolution before if previous_variables['Resolution'] == '640x480': # if yes, set the variable to that resolution - this will make sure that the menu option self.resolution_variable.set(self.available_resolution[0]) self.resolution = '640x480' # todo - delete if below in final version if RASPBERRY: # Here the resolution of the camera is changed self.cam.resolution = (640, 480) # check if the user used the 1024x768 resolution before elif previous_variables['Resolution'] == '1024x768': self.resolution_variable.set(self.available_resolution[1]) # todo - understand why we have resolution, resolution_variable and cam.resolution on the Pi self.resolution = '1024x768' # todo - delete if below in final version if RASPBERRY: self.cam.resolution = (1024, 768) elif previous_variables['Resolution'] == '1296x972': self.resolution_variable.set(self.available_resolution[2]) self.resolution = '1296x972' # todo - delete if below in final version if RASPBERRY: self.cam.resolution = (1296, 972) elif previous_variables['Resolution'] == '1920x1080': self.resolution_variable.set(self.available_resolution[3]) self.resolution = '1920x1080' # todo - delete if below in final version if RASPBERRY: self.cam.resolution = (1920, 1080) elif previous_variables['resolution'] == '2592x1944': self.resolution_variable.set(self.available_resolution[4]) self.resolution = '2592x1944' # todo - delete if below in final version if RASPBERRY: self.cam.resolution = (2592, 1944) except: # todo - no blank exceptions # if for some reason the above failed, just set the resolution to 640x480 self.resolution_variable.set(self.available_resolution[0]) self.resolution = '640x480' # todo - delete if below in final version if RASPBERRY: self.cam.resolution = (640, 480) else: self.resolution_variable.set(self.available_resolution[0]) self.resolution = '640x480' # todo - delete if below in final version if RASPBERRY: self.cam.resolution = (640, 480) # todo - delete if below in final version if RASPBERRY or virtual_raspberry: self.preview_window_size_variable = tk.StringVar() self.preview_window_size_value = 180 #self.camera_brightness_variable = tk.IntVar() #self.camera_brightness_value = 50 #self.camera_contrast_variable = tk.IntVar() #self.camera_contrast_value = 50 #self.camera_exposure_variable = tk.IntVar() #self.camera_exposure_value = 0 # organisms are defined in the 'list_of_available_organisms.json' file in the main folder of this software # An example entry of one organism: # "3rd instar Dmel" : { # "filled_area_min_mm" : 8, # "filled_area_max_mm" : 24, # "eccentricity_min" : 0.5, # "eccentricity_max" : 1.0, # "major_over_minor_axis_min": 1.25, # "major_over_minor_axis_max": 7, # "max_skeleton_length_mm": 4 # The string before the { is the name of the organism. After the { are several non-optional heuristic parameters # filled_area_min and max, eccentricity_min and max and major_over_minor_axis_min and max are necessary to # define the animal before the experiment starts # max_skeleton_length defines the size of the pre-allocated numpy array that will be used to save the images # the max_speed_animal_mm_per_s will be used to define the size of the search box self.model_organism_variable = tk.StringVar() self.model_organism_value = 'Not in list' # self.available_model_organisms = ('Not in list', 'Drosophila larva', 'D rerio 5dpf', 'Planaria') # add options to the optionmenu here # read dynamically which model organisms are saved in the json file self.available_model_organisms = [] # list comprehension (compact for loop) to add all the available animals [self.available_model_organisms.append(i) for i in organisms_and_heuristics] if previous_variables is not None and 'Organism' in previous_variables: for i in range(len(self.available_model_organisms)): if previous_variables['Organism'] == self.available_model_organisms[i]: self.model_organism_variable.set(self.available_model_organisms[i]) else: # default is 'not in list' self.model_organism_variable.set(self.available_model_organisms[0]) # Here the menu, that lets the user choose what to display during the experiment, is constructed self.experiment_observation_mode = tk.StringVar() # If more modes shall be added in the future, add them to this tuple. self.available_modes = ('only animal', 'overview', 'none', 'debug') if previous_variables != None: try: for experiment_observation_mode_loop in range(len(self.available_modes)): if previous_variables['Observation mode'] == self.available_modes[experiment_observation_mode_loop]: self.experiment_observation_mode.set(self.available_modes[experiment_observation_mode_loop]) self.experiment_observation_mode_output = self.available_modes[experiment_observation_mode_loop] except: self.experiment_observation_mode.set(self.available_modes[0]) self.experiment_observation_mode_output = self.available_modes[0] else: self.experiment_observation_mode.set(self.available_modes[0]) self.experiment_observation_mode_output = self.available_modes[0] # Changed to a menu button for the magnification of the observation during the experiment #self.observation_resize_variable = tk.IntVar() #self.observation_resize_options = (1, 2, 3, 4) #if previous_variables != None: # try: # for i in range(len(self.observation_resize_options)): # if previous_variables['Observation Resize'] == self.observation_resize_options[i]: # self.observation_resize_variable.set(self.observation_resize_options[i]) # else: # # if not in list, set default to '1' # self.observation_resize_variable.set(self.observation_resize_options[0]) # except: # # if for some reason not in previous_variables, fall back to default '1' # self.observation_resize_variable.set(self.observation_resize_options[0]) #else: # # default is '1' # self.observation_resize_variable.set(self.observation_resize_options[0]) self.display_pixel_per_mm_variable = tk.DoubleVar() self.pixel_per_mm_var = tk.DoubleVar() if previous_variables != None: try: self.pixel_per_mm_var.set(float(round(previous_variables['distance factor'],2))) self.known_distance = previous_variables['known distance'] self.display_pixel_per_mm_variable.set(self.pixel_per_mm_var.get()) except KeyError: self.pixel_per_mm_var.set(0) self.known_distance = 1 #self.debug_mode_var = False self.debug_mode_resize = 2 else: self.pixel_per_mm_var.set(0) self.known_distance = 'none' # todo - check, i dont think this is actually used anywhere right now # All the variable we'll need for the preview frame constructions # to assign the class afterwards #self.debug_mode = None #if previous_variables != None: # try: # self.debug_mode_var = previous_variables['debug mode'] # self.debug_mode_resize = previous_variables['debug mode resize'] # self.preview_resize = previous_variables['preview resize'] # except: # self.debug_mode_var = False # self.debug_mode_resize = 2 # self.preview_resize = 2 #else: # self.debug_mode_var = False # self.debug_mode_resize = 2 # self.preview_resize = 2 # print('set debug mode to ' + repr(self.debug_mode_var)) self.debug_mode_var = tk.StringVar() # todo - save in json and read again, for now just self.debug_mode_var.set('OFF') # get vr_arena variable in the correct resolution resolution_height = int(self.resolution.split("x")[0]) resolution_width = int(self.resolution.split("x")[1]) self.vr_arena = np.zeros((resolution_width, resolution_height)) self.overlay_bool = False self.overlay = None self.overlay_image = None self.placed_animal = None self.animal_annotation = None self.vr_arena_name = None self.path = None self.framerate_read_from_experiment_settings = None self.pixel_per_mm_read_from_experiment_settings = None self.organism_read_from_experiment_settings = None self.genotype_read_from_experiment_settings = None self.resolution_read_from_experiment_settings = None self.recording_time_read_from_experiment_settings = None self.entered_text = None self.convert_image_output_variable = tk.StringVar() self.convert_image_output_options = ('.npy', '.avi') self.convert_image_output_variable.set(self.convert_image_output_options[0]) self.playback_speed_options = ('0.1X', '0.5X', '1X', '2X', '5X', 'Fastest') self.playback_speed_var = tk.StringVar() self.playback_speed_var.set(self.playback_speed_options[2]) self.time_dependent_stimulation_file = None # This is only used for the tools part. Whenever the user selects a folder it will be global self.data_path = None self.experimental_metadata = None self.vr_arena_multidimensional = False self.multiple_folder_analysis_var = tk.IntVar() self.animal_detection_method_var = tk.StringVar() # todo - implement previous variables if previous_variables is not None and 'Animal Detection Mode' in previous_variables: self.animal_detection_method_var.set(previous_variables['Animal Detection Mode']) else: self.animal_detection_method_var.set('Mode 1') # make this a bit nicer #self.child_window_optimize_image = None self.auto_exposure_button = None self.child_high_power_LED = None # textvariable for the entry for path self.path_entry_variable = tk.StringVar() if previous_variables != None: try: self.path_entry_variable.set(previous_variables['Path']) except KeyError: self.path_entry_variable.set('/home/pi/Desktop/') else: self.path_entry_variable.set('/home/pi/Desktop/') # textvariable for entry of framerate self.framerate_entry_variable = tk.StringVar() if previous_variables != None: try: self.framerate_entry_variable.set(previous_variables['Recording framerate']) except KeyError: # be sure that is correct - e.g. if user fiddles manually in settings file she might set it to zero self.framerate_entry_variable.set('30') else: self.framerate_entry_variable.set('30') # textvariable for genotype self.genotype_entry_variable = tk.StringVar() if previous_variables != None: #try: if 'Exp. group' in previous_variables: self.genotype_entry_variable.set(previous_variables['Exp. group']) elif 'genotype' in previous_variables: self.genotype_entry_variable.set(previous_variables['genotype']) #except: # self.genotype_entry_variable.set('myExpGroup') else: self.genotype_entry_variable.set('myExpGroup') # textvariable for recordingtime self.recording_time_variable = tk.StringVar() if previous_variables != None: try: self.recording_time_variable.set(previous_variables['Recording length']) except: self.recording_time_variable.set('20') else: self.recording_time_variable.set('20') self.recording_days_variable = tk.StringVar() self.recording_days_variable.set('0') self.recording_hours_variable = tk.StringVar() self.recording_hours_variable.set('0') self.recording_minutes_variable = tk.StringVar() self.recording_minutes_variable.set('0') self.between_time_days = tk.StringVar() self.between_time_days.set('0') self.between_time_hours = tk.StringVar() self.between_time_hours.set('0') self.between_time_minutes = tk.StringVar() self.between_time_minutes.set('0') self.between_time_seconds = tk.StringVar() self.between_time_seconds.set('0') # variable to keep name of time dependent stimulus file self.time_dependent_stim_file_name = tk.StringVar() # better not the save and re-load when # variable to display timedep file user has selected self.chosen_timedep_file_display_var = tk.StringVar() self.chosen_timedep_file_display_var.set(None) # for image sequence recording give option to record different formats self.image_sequence_format_var = tk.StringVar() self.image_sequence_format_var.set('jpg') # todo, make saveable in saved_variables.json self.image_sequence_format_options = ('jpg', 'png', 'rgb', 'yuv', 'rgba') self.vr_update_rate = 1 # This variable is necessary to ensure that each selected arena (if high power LED setup) only gets inverted once! self.invert_arena_bool = True # Used to track user provided intensity self.vr_intensity_adjustment_variable = tk.StringVar() # ALWAYS set to 100% when program starts!!! self.vr_intensity_adjustment_variable.set('100') # Cam preview on or off self.preview_bool = False # body part defining vr stimulation self.vr_stim_loc_var = tk.StringVar() # set to head as standard if previous_variables is not None \ and 'VR Body Part Stimulation' in previous_variables: self.vr_stim_loc_var.set( previous_variables['VR Body Part Stimulation']) else: self.vr_stim_loc_var.set('Head') # output file options. Bools to either save or not save # the npy files etc. self.save_centroids_npy = tk.BooleanVar() if previous_variables is not None: if 'save centroids npy' in previous_variables: self.save_centroids_npy.set(previous_variables['save centroids npy']) else: self.save_centroids_npy.set(0) # standard is OFF else: self.save_centroids_npy.set(0) # standard is OFF self.save_heads_npy = tk.BooleanVar() if previous_variables is not None: if 'save heads npy' in previous_variables: self.save_heads_npy.set(previous_variables['save heads npy']) else: self.save_heads_npy.set(0) # standard is OFF else: self.save_heads_npy.set(0) # standard is OFF self.save_tails_npy = tk.BooleanVar() if previous_variables is not None: if 'save tails npy' in previous_variables: self.save_tails_npy.set(previous_variables['save tails npy']) else: self.save_tails_npy.set(0) # standard is OFF self.save_midpoints_npy = tk.BooleanVar() if previous_variables is not None: if 'save midpoints npy' in previous_variables: self.save_midpoints_npy.set(previous_variables['save midpoints npy']) else: self.save_midpoints_npy.set(0) # standard is OFF else: self.save_midpoints_npy.set(0) # standard is OFF self.save_bbox_npy = tk.BooleanVar() if previous_variables is not None: if 'save bbox npy' in previous_variables: self.save_bbox_npy.set(previous_variables['save bbox npy']) else: self.save_bbox_npy.set(0) # standard is OFF else: self.save_bbox_npy.set(0) # standard is OFF self.save_stim_npy = tk.BooleanVar() if previous_variables is not None: if 'save stim npy' in previous_variables: self.save_stim_npy.set(previous_variables['save stim npy']) else: self.save_stim_npy.set(0) # standard is OFF else: self.save_stim_npy.set(0) # standard is OFF self.save_thresh_npy = tk.BooleanVar() if previous_variables is not None: if 'save sm_thresh npy' in previous_variables: self.save_thresh_npy.set(previous_variables['save sm_thresh npy']) else: self.save_thresh_npy.set(1) # standard is ON else: self.save_thresh_npy.set(1) # standard is ON self.save_skel_npy = tk.BooleanVar() if previous_variables is not None: if 'save sm_skeletons npy' in previous_variables: self.save_skel_npy.set(previous_variables['save sm_skeletons npy']) else: self.save_skel_npy.set(1) # standard is ON else: self.save_skel_npy.set(1) # standard is ON self.online_undistort_bool = tk.BooleanVar() if previous_variables is not None: if 'online undistort bool' in previous_variables: self.online_undistort_bool.set(previous_variables['online undistort bool']) else: self.online_undistort_bool.set(1) # standard is ON else: self.online_undistort_bool.set(1) # standard is ON # if cv2 not installed disable no matter what online undistort bool says if not CV2_INSTALLED: self.online_undistort_bool.set(0) self.undistort_radiobutton_var = tk.IntVar() if previous_variables is not None: if 'undistort radiobutton var' in previous_variables: self.undistort_radiobutton_var.set(previous_variables['undistort radiobutton var']) else: self.undistort_radiobutton_var.set(0) # standard is standard path self.undistort_path = None if self.undistort_radiobutton_var.get() == 0: self.undistort_path = \ Path(Path(SOFTWARE_PATH), 'undistort_matrices', 'standard') elif self.undistort_radiobutton_var.get() == 1: self.undistort_path = \ Path(Path(SOFTWARE_PATH), 'undistort_matrices', 'user_provided') self.undistort_dst_file = None self.undistort_mtx_file = None # Created using the dst and mtx file in 'grab_undistort_files' self.newcameramtx = None self.opencv_information_box = None class SubFrames(): """ This class is used to construct all the different frames (windows that are displayed in the GUI). To keep the code short different parts of the windows are re-used. Using the boolean switches defined in the __init__ different parts of the frame can be called which then constructs a given frame. """ def __init__(self, camera_class, controller, cam_frame=False, subframe_preexperiment=False, misc_frame=False, observation_mode=False, distance_configuration=False, model_organism=False, update_metadata=False, exp_ctr_frame=False,start_text='Not defined', experiment_name='Not defined', frame=None, VR_arena=False, quit_frame=False, display_experiment_settings=False, fix_metadata=False, convert_images_frame=False, time_dependent_stim_Frame=False, dynamic_VR_arena=False, timelapse_frame=False): self.controller = controller self.common_variables = CommonVariables(camera_class) self.camera_class = camera_class self.cam = self.camera_class.cam self.sub_subframe_cam = None if cam_frame: #self.camera_frame_func(subframe_preexperiment=subframe_preexperiment) self.camera_frame_func(frame=frame) if misc_frame: self.misc_frame_func(subframe_preexperiment=subframe_preexperiment, observation_mode=observation_mode, distance_configuration=distance_configuration, model_organism=model_organism, update_metadata=update_metadata) self.experiment_control_frame_bool = exp_ctr_frame if self.experiment_control_frame_bool: self.experiment_control_frame( frame, VR_arena=VR_arena, start_text=start_text, experiment_name=experiment_name, dynamic_VR_arena = dynamic_VR_arena, timelapse_frame = timelapse_frame ) if quit_frame: self.quit_frame_func(frame) if display_experiment_settings: self.display_experiment_settings_frame_func( subframe_preexperiment, fix_metadata=fix_metadata ) if convert_images_frame: self.convert_images_frame_func(subframe_preexperiment) if time_dependent_stim_Frame: self.time_dependent_stimulus_frame(subframe_preexperiment) def experiment_control_frame(self, frame, VR_arena=False, start_text='Not defined', experiment_name='Not defined', dynamic_VR_arena=False, timelapse_frame=False): self.subframe_expcontrol = tk.LabelFrame(frame, text=experiment_name, font='Helvetica 18 bold') self.subframe_expcontrol.grid(row=1, column=1) self.sub_subframe_tracking = tk.LabelFrame(self.subframe_expcontrol, text='') self.sub_subframe_tracking.grid(row=7, column=0, columnspan=2) self.sub_subframe_exp_setting = tk.LabelFrame(self.subframe_expcontrol, text='') self.sub_subframe_exp_setting.grid(row=6, column=0,columnspan=2) # fix dimesions of the frame - otherwise have ugly re-sizing when long or short names if RASPBERRY: self.sub_subframe_exp_setting.configure(height=70, width=400) else: self.sub_subframe_exp_setting.configure(height=70, width=300) self.sub_subframe_exp_setting.grid_propagate(0) self.genotypelabel = tk.Label(self.subframe_expcontrol, text='Exp. Group:') self.genotypelabel.grid(row=2, column=0) self.genotype_entry = tk.Entry(self.subframe_expcontrol, width=30, textvariable=self.controller.all_common_variables.genotype_entry_variable) self.genotype_entry.grid(row=2, column=1) if VR_arena: if dynamic_VR_arena: self.select_vr_arena_button = tk.Button( self.subframe_expcontrol, text='Select VR arena', command=lambda: self.controller.all_common_functions.select_vr_arena_func(dynamic_VR=True) ) else: self.select_vr_arena_button = tk.Button( self.subframe_expcontrol, text='Select VR arena', command=lambda: self.controller.all_common_functions.select_vr_arena_func(dynamic_VR=False) ) self.select_vr_arena_button.grid(row=3, column=0) if not dynamic_VR_arena: self.downscale_arena_label = tk.Label( self.subframe_expcontrol, text = 'Adjust Power[%]') self.downscale_arena_label.grid(row=4, column=0) self.downscale_arena_entry = tk.Entry( self.subframe_expcontrol, width=5, textvariable=self.controller.all_common_variables.vr_intensity_adjustment_variable) self.downscale_arena_entry.grid(row=5, column=0) # call the plotting library and plot the empty arena into a figure self.fig = Figure(figsize=(2, 1)) self.ax_vr_arena = self.fig.add_subplot(111) if not dynamic_VR_arena: self.image_of_arena = self.ax_vr_arena.imshow( self.controller.all_common_variables.vr_arena, vmin=0, vmax=PWM_RANGE ) else: self.image_of_arena = self.ax_vr_arena.imshow( self.controller.all_common_variables.vr_arena, vmin=0, vmax=255 ) # move the axis to the left (-0.3 points for now) # todo make nicer pos1 = self.ax_vr_arena.get_position() # get the original position pos2 = [pos1.x0 - 0.2, pos1.y0, pos1.width / 1.0, pos1.height / 1.0] self.ax_vr_arena.set_position(pos2) # set a new position # Now adding the colorbar # todo make nicer # [xpos, ypos, probably limitx and limit y) cbaxes = self.fig.add_axes([0.6, 0.1, 0.03, 0.8]) self.fig.colorbar(self.image_of_arena, cax=cbaxes) #self.fig.colorbar(self.image_of_arena) # turn off the labels and axis to save space self.ax_vr_arena.axes.get_xaxis().set_ticks([]) self.ax_vr_arena.axes.get_yaxis().set_ticks([]) # bind the plot to the GUI self.canvas = FigureCanvasTkAgg(self.fig, master=self.subframe_expcontrol) self.canvas.draw() self.canvas.get_tk_widget().grid(row=3, column=1, columnspan=1, rowspan=3) # plt.tight_layout() if RASPBERRY or virtual_raspberry: path_label = tk.Label(self.subframe_expcontrol, text='Save in:') path_label.grid(row=0, column=0, columnspan=1) # instead of an entry use button: path_to_save_experiment = tk.Button(self.subframe_expcontrol, text='Press to select folder', command = self.controller.all_common_functions.select_folder_to_save_exp_func, textvariable= self.controller.all_common_variables.path_entry_variable, width = 30) path_to_save_experiment.grid(row=0, column=1, columnspan=1) else: # in case not on Raspberry self.pathlabel = tk.Label(self.subframe_expcontrol, textvariable=self.controller.all_common_variables.image_path) self.pathlabel.grid(row=0, column=0) pathbutton = tk.Button(self.subframe_expcontrol, text='Press to select data to analyze', command=self.controller.all_common_functions.select_images) pathbutton.grid(row=1, column=0) if not RASPBERRY and not virtual_raspberry: self.display_frameratelabel = tk.Label(self.subframe_expcontrol, text='display framerate') self.display_frameratelabel.grid(row=2, column=1) self.display_framerate = tk.Text(self.subframe_expcontrol, height=1, width=5) self.display_framerate.grid(row=3, column=1) if previous_variables is not None: try: self.display_framerate.insert(tk.END, previous_variables['display framerate']) except: self.display_framerate.insert(tk.END, '2') else: self.display_framerate.insert(tk.END, '2') if RASPBERRY or virtual_raspberry: if timelapse_frame: # Create another subframe as it looks nicer self.subframe_total_time = tk.LabelFrame( self.sub_subframe_tracking, text='Recording Time') self.subframe_total_time.grid(row=0, column=0) # Days self.recording_days_label = tk.Label( self.subframe_total_time, text='Days') self.recording_days_label.grid(row=1, column=0) self.recording_days_entry = tk.Entry( self.subframe_total_time, width=5, textvariable=self.controller.all_common_variables.recording_days_variable) self.recording_days_entry.grid(row=2, column=0) # HOURS self.recording_hours_label = tk.Label( self.subframe_total_time, text='Hours') self.recording_hours_label.grid(row=1,column=1) self.recording_hours_entry = tk.Entry( self.subframe_total_time, width=5, textvariable=self.controller.all_common_variables.recording_hours_variable) self.recording_hours_entry.grid(row=2, column=1) # MINUTES self.recording_minutes_label = tk.Label( self.subframe_total_time, text='Minutes') self.recording_minutes_label.grid(row=1,column=2) self.recording_minutes_entry = tk.Entry( self.subframe_total_time, width=5, textvariable=self.controller.all_common_variables.recording_minutes_variable) self.recording_minutes_entry.grid(row=2, column=2) # TIME BETWEEN IMAGES # Create another subframe as it looks nicer self.subframe_time_between = tk.LabelFrame( self.sub_subframe_tracking, text='Time between Images') self.subframe_time_between.grid(row=1, column=0) # DAYS self.between_time_days_label = tk.Label( self.subframe_time_between, text='Days') self.between_time_days_label.grid(row=4, column=0) self.between_time_days_entry = tk.Entry( self.subframe_time_between, width=5, textvariable=self.controller.all_common_variables.between_time_days) self.between_time_days_entry.grid(row=5, column=0) # HOURS self.between_time_hours_label = tk.Label( self.subframe_time_between, text='Hours') self.between_time_hours_label.grid(row=4, column=1) self.between_time_hours_entry = tk.Entry( self.subframe_time_between, width=5, textvariable=self.controller.all_common_variables.between_time_hours) self.between_time_hours_entry.grid(row=5, column=1) # MINUTES self.between_time_minutes_label = tk.Label( self.subframe_time_between, text='Minutes') self.between_time_minutes_label.grid(row=4, column=2) self.between_time_minutes_entry = tk.Entry( self.subframe_time_between, width=5, textvariable=self.controller.all_common_variables.between_time_minutes) self.between_time_minutes_entry.grid(row=5, column=2) # SECONDS self.between_time_seconds_label = tk.Label( self.subframe_time_between, text='Seconds') self.between_time_seconds_label.grid(row=4, column=3) self.between_time_seconds_entry = tk.Entry( self.subframe_time_between, width=5, textvariable=self.controller.all_common_variables.between_time_seconds) self.between_time_seconds_entry.grid(row=5, column=3) else: self.recordingtime_label = tk.Label(self.sub_subframe_tracking, text='Recording Time[s]') self.recordingtime_label.grid(row=0, column=0) self.recordingtime_entry = tk.Entry(self.sub_subframe_tracking, width=5, textvariable=self.controller.all_common_variables.recording_time_variable) self.recordingtime_entry.grid(row=1, column=0) # Currently this seems to be the best way - this function # should be defined in each class as it can be widely # different between classes (both in which module/function to # call as well as arguments passed). # The current solution is to outsource this to the # controller which then calls the function (which must # always have the same name: "start_experiment_function") and # it should be obvious and easy to understand which function # with which arguments is called in each window. self.start_tracking_button = tk.Button( self.sub_subframe_tracking, text=start_text, command=lambda: self.controller.call_start_experiment_function( self.controller.current_window)) if timelapse_frame: self.start_tracking_button.grid(row=6, column=0, columnspan=3) else: self.start_tracking_button.grid(row=2, column=0) if experiment_name == 'Image Sequence' or timelapse_frame: image_format_frame = tk.Frame( self.sub_subframe_tracking, borderwidth=2, relief='groove') image_format_frame.grid(row=0, column=1, rowspan=3) image_format_label = tk.Label( image_format_frame, text='Image Format') image_format_label.grid(row=0, column=0) image_format_option = tk.OptionMenu( image_format_frame, self.controller.all_common_variables.image_sequence_format_var, *self.controller.all_common_variables.image_sequence_format_options) image_format_option.grid(row=1, column=0) # print pixel per mm next to start experiment button self.pixel_per_mm_label_left = tk.Label( self.sub_subframe_exp_setting, text='Pixel/mm: ') self.pixel_per_mm_label_left.grid(row=0, column=0, sticky='W') self.pixel_per_mm_label_right = tk.Label( self.sub_subframe_exp_setting, textvariable=self.controller.all_common_variables.pixel_per_mm_var) self.pixel_per_mm_label_right.grid(row=0, column=1, sticky='W') ''' self.debug_mode_label_left = tk.Label(self.sub_subframe_exp_setting, text='Debug Mode: ') self.debug_mode_label_left.grid(row=2, column=0, sticky='W') if experiment_name == 'Online Tracking' \ or experiment_name == 'Closed Loop stimulation': self.debug_mode_label_right = tk.Label( self.sub_subframe_exp_setting, textvariable=self.controller.all_common_variables.debug_mode_var) else: self.debug_mode_label_right = tk.Label(self.sub_subframe_exp_setting, text='NA') self.debug_mode_label_right.grid(row=2, column=1, sticky='W') ''' # Show the body part that has been defined as the defining # x/y position for the presentation of the virtual arena self.vr_stim_part_label_left = tk.Label( self.sub_subframe_exp_setting, text='VR stim at:') self.vr_stim_part_label_left.grid(row=2, column=0, sticky='W') if experiment_name == 'Closed Loop stimulation'\ or experiment_name == 'Dynamic VR': self.vr_stim_part_label_right = tk.Label( self.sub_subframe_exp_setting, textvariable=self.controller.all_common_variables.vr_stim_loc_var ) else: self.vr_stim_part_label_right = tk.Label( self.sub_subframe_exp_setting, text='N/A') self.vr_stim_part_label_right.grid(row=2, column=1, sticky='W') show_framerate_label_left = tk.Label(self.sub_subframe_exp_setting, text='Framerate:') show_framerate_label_left.grid(row=1, column=0, sticky='W') show_framerate_label_right = tk.Label( self.sub_subframe_exp_setting, textvariable=self.controller.all_common_variables.framerate_entry_variable) show_framerate_label_right.grid(row=1, column=1, sticky='W') self.animal_detection_method_label_left = tk.Label( self.sub_subframe_exp_setting, text='Animal Detection:') self.animal_detection_method_label_left.grid(row=0, column=2, sticky='W') if experiment_name == 'Online Tracking' \ or experiment_name == 'Closed Loop stimulation' \ or experiment_name == 'Dynamic VR': self.animal_detection_method_label_right = tk.Label( self.sub_subframe_exp_setting, textvariable=self.controller.all_common_variables.animal_detection_method_var) else: self.animal_detection_method_label_right = tk.Label( self.sub_subframe_exp_setting, text='N/A') self.animal_detection_method_label_right.grid( row=0, column=3, sticky='W') show_resolution_label_left = tk.Label( self.sub_subframe_exp_setting, text='Cam Resolution:') show_resolution_label_left.grid(row=1, column=2, sticky='W') show_resolution_label_right = tk.Label( self.sub_subframe_exp_setting, textvariable=self.controller.all_common_variables.resolution_variable) show_resolution_label_right.grid(row=1, column=3, sticky='W') # print organism next to start experiment button self.organism_label_left = tk.Label( self.sub_subframe_exp_setting, text='Animal: ') self.organism_label_left.grid(row=2,column=2, sticky='W') self.organims_label_right = tk.Label( self.sub_subframe_exp_setting, textvariable = self.controller.all_common_variables.model_organism_variable) self.organims_label_right.grid(row=2, column=3, sticky='W') def quit_frame_func(self, frame): """ This function handles the options in "File" of the menubar. """ self.subframe_quit = tk.LabelFrame(frame, text='') self.subframe_quit.grid(row=8, column=0, sticky='W') self.apply_button = tk.Button( self.subframe_quit, text='Save changes', fg='blue', command=lambda: self.controller.all_common_functions.quit_sequence( save=True, exit=False)) self.apply_button.grid(row=0, column=0) self.exit_button = tk.Button( self.subframe_quit, text='Save and Exit', fg='green', command=lambda: self.controller.all_common_functions.quit_sequence( save=True, exit=True)) self.exit_button.grid(row=0, column=1) self.quit = tk.Button( self.subframe_quit, text='Discard and Exit', fg='red', command=lambda: self.controller.all_common_functions.quit_sequence( save=False, exit=True)) self.quit.grid(row=0, column=2) def misc_frame_func(self, subframe_preexperiment=False, observation_mode=False, distance_configuration=False, model_organism=False, update_metadata=False): ''' TODO: I don't think this is necessary - test if everything works if deleted! This function takes the left subframe below the camera control and defines several potential subframes :param subframe_preexperiment: :param observation_mode: If set to True, will construct another subframe called sub_subframe_observation_mode which will host a menu where the user can choose how to display what the algorithm is collecting while the experiment is running. There will also be an option of how large the observation shall be displayed with 1 being the original. The larger, the more CPU power is needed for displaying and the less resources are available for the actual recording/tracking :param distance_configuration: If set to True, will construct another subframe called sub_subframe_config_dist which has a text field that will either display 'None' if the pixel per mm ratio is unknown or the measured pixel per mm ratio. Below that field a button is constructed that will call the function *distance_configuration_func* from the *CommonFunctions* class through the controller. :param model_organism: If set to True, will construct a subframe called sub_subframe_model_organism which has an option menu with which the user will be able to select the model organism she's working with. :return: ''' self.sub_subframe_misc = tk.LabelFrame(subframe_preexperiment, text='Other options') self.sub_subframe_misc.grid(row=2, column=0) #if distance_configuration: # # Then the buttons are constructed # self.sub_subframe_config_dist = tk.LabelFrame(self.sub_subframe_misc, text='Pixel per mm') # self.sub_subframe_config_dist.grid(row=0, column=0) # # self.distance_label = tk.Label(self.sub_subframe_config_dist, # textvariable=self.controller.all_common_variables.display_pixel_per_mm_variable) # self.distance_label.grid(row=0, column=1) # self.controller.all_common_variables.display_pixel_per_mm_variable.set( # repr(self.controller.all_common_variables.pixel_per_mm_var.get())) # # self.distance_button = tk.Button(self.sub_subframe_config_dist, text='configure dist', # command=self.controller.all_common_functions.distance_configuration_func) # self.distance_button.grid(row=1, column=1) #if model_organism: # self.sub_subframe_model_organism = tk.LabelFrame(self.sub_subframe_misc, text='Choose the organism') # self.sub_subframe_model_organism.grid(row=0, column=1) # # self.model_organism_menu = tk.OptionMenu(self.sub_subframe_model_organism, # self.controller.all_common_variables.model_organism_variable, # *self.controller.all_common_variables.available_model_organisms) # self.model_organism_menu.grid(row=0, column=0) #if observation_mode: # self.sub_subframe_observation_mode = tk.LabelFrame(self.sub_subframe_misc, text='Preview mode') # self.sub_subframe_observation_mode.grid(row=0, column=2) # # self.previewmode_menu = tk.OptionMenu(self.sub_subframe_observation_mode, # self.controller.all_common_variables.experiment_observation_mode, # *self.common_variables.available_modes) # self.previewmode_menu.grid(row=0, column=0, columnspan=2) # # self.preview_resize_label = tk.Label(self.sub_subframe_observation_mode, text='Magnification') # self.preview_resize_label.grid(row=1, column=0) # # self.preview_size_menu = tk.OptionMenu(self.sub_subframe_observation_mode, # self.controller.all_common_variables.observation_resize_variable, # *self.common_variables.observation_resize_options) # self.preview_size_menu.grid(row=1, column=1) if update_metadata: self.sub_subframe_update_metadata = tk.LabelFrame(self.sub_subframe_misc, text='Update Metadata') self.sub_subframe_update_metadata.grid(row=0, column=3) self.update_metadata_button = tk.Button(self.sub_subframe_update_metadata, text='Press to save metadata', command=self.controller.all_common_functions.overwrite_metadata) self.update_metadata_button.grid(row=0, column=0) self.adjust_arena_to_animal = None def display_experiment_settings_frame_func(self, subframe_preexperiment, fix_metadata=False): self.display_experiment_settings_frame = tk.LabelFrame(subframe_preexperiment, text='Experiment Settings') self.display_experiment_settings_frame.grid(row=1, column=0) self.pathbutton = tk.Button(self.display_experiment_settings_frame, text='Select data to analyze', command=lambda: self.controller.all_common_functions.select_images( display_settings=True)) self.pathbutton.grid(row=0, column=0, columnspan=2) self.pathchosen = tk.Label(self.display_experiment_settings_frame, text='no data chosen yet') self.pathchosen.grid(row=1, column=0, columnspan=2) self.experiment_date_n_time_frame = tk.LabelFrame(self.display_experiment_settings_frame, text='Experiment Date/Time') self.experiment_date_n_time_frame.grid(row=2, column=0, sticky='nsew') self.experiment_date_n_time_label = tk.Label(self.experiment_date_n_time_frame, text='') self.experiment_date_n_time_label.grid(row=0, column=0) if fix_metadata: self.experiment_date_n_time_update_button = \ tk.Button(self.experiment_date_n_time_frame, text='Change', command=lambda: self.controller.all_common_functions.metadata_fix_date_time()) self.experiment_date_n_time_update_button.grid(row=1, column=0) self.genotype_frame = tk.LabelFrame(self.display_experiment_settings_frame, text='Exp. group') #self.genotype_frame = tk.LabelFrame(self.display_experiment_settings_frame, text='Genotype') self.genotype_frame.grid(row=3, column=0, sticky='nsew') self.genotype_label = tk.Label(self.genotype_frame, text='') self.genotype_label.grid(row=0, column=0) self.species_frame = tk.LabelFrame(self.display_experiment_settings_frame, text='Species') self.species_frame.grid(row=4, column=0, sticky='nsew') self.species_label = tk.Label(self.species_frame, text='') self.species_label.grid(row=0, column=0) if fix_metadata: self.update_metadata_organism_button = tk.Button(self.species_frame, text='Change', command=lambda: self.controller.all_common_functions.metadata_fix_organism()) self.update_metadata_organism_button.grid(row=1, column=0) self.resolution_frame = tk.LabelFrame(self.display_experiment_settings_frame, text='Resolution') self.resolution_frame.grid(row=5, column=0, sticky='nsew') self.resolution_label = tk.Label(self.resolution_frame, text='') self.resolution_label.grid(row=0, column=0) self.recording_time_frame = tk.LabelFrame(self.display_experiment_settings_frame, text='Recorded Time') self.recording_time_frame.grid(row=2, column=1, sticky='nsew') self.recording_time_label = tk.Label(self.recording_time_frame, text='') self.recording_time_label.grid(row=0, column=0) self.framerate_label_frame = tk.LabelFrame(self.display_experiment_settings_frame, text='Framerate') self.framerate_label_frame.grid(row=3, column=1, sticky='nsew') self.framerate_label = tk.Label(self.framerate_label_frame, text='') self.framerate_label.grid(row=0, column=0) self.timestamps_frame = tk.LabelFrame(self.display_experiment_settings_frame, text='Timestamps') self.timestamps_frame.grid(row=4, column=1, sticky='nsew') self.timestamps_label = tk.Label(self.timestamps_frame, text='') self.timestamps_label.grid(row=0, column=0) self.pixel_per_mm_frame = tk.LabelFrame(self.display_experiment_settings_frame, text='Pixel per mm') self.pixel_per_mm_frame.grid(row=5, column=1, sticky='nsew') self.pixel_per_mm_label = tk.Label(self.pixel_per_mm_frame, text='') self.pixel_per_mm_label.grid(row=0, column=0) if fix_metadata: self.update_metadata_pixel_per_mm_button = tk.Button( self.pixel_per_mm_frame, text='Change', command=self.controller.all_common_functions.distance_configuration_func) self.update_metadata_pixel_per_mm_button.grid(row=1, column=0) def convert_images_frame_func(self, subframe_preexperiment): """ This function is used to construct the frame "Simulate online tracking". There is a commented part that would allow the user to select particular sets of images to use for tracking. Ideally image selection should now be done in 'Tools->Image data handling'. Afterwards, the 'Simulate online tracking' can be used. """ self.convert_images_frame = tk.LabelFrame(subframe_preexperiment, text='') self.convert_images_frame.grid(row=2, column=0) ''' # I think for most people this is confusing, take out for now # Define how to look for images self.common_string_frame = tk.LabelFrame( self.convert_images_frame, text='Define names of exp. images') self.common_string_frame.grid(row=0, column=0, columnspan=3) self.common_str_label = tk.Label( self.common_string_frame, text='common image string ' '\n(image*.jpg, *.png...)') self.common_str_label.grid(row=1, column=0) self.common_str = tk.Entry(self.common_string_frame, width=8) self.common_str.grid(row=2, column=0) # Better to have the user explicitly call for an update - as the folders with images is likely to be really full # the updating might take a noticable amount of time (I tested 4500images, took several seconds) self.common_str_update_button = tk.Button(self.common_string_frame, text='update # of images', command=self.controller.all_common_functions.common_str_update_func) self.common_str_update_button.grid(row=1, column=1) self.common_str_frame = tk.LabelFrame(self.common_string_frame, text='Number of Images') self.common_str_frame.grid(row=2, column=1) self.common_str_counted = tk.Label(self.common_str_frame, text='') self.common_str_counted.grid(row=5, column=0) ''' ############# # DONT DELETE YET # button that lets user convert the jpgs in a given folder to a npy array explicitly #self.convert_single_images_frame = tk.LabelFrame(self.convert_images_frame, text='Conversion') #self.convert_single_images_frame.grid(row=0, column=1) # #self.convert_single_images_choose_output_label = tk.Label(self.convert_single_images_frame, # text='Target Format') #self.convert_single_images_choose_output_label.grid(row=0, column=0) # #self.convert_single_images_choose_output_option = tk.OptionMenu(self.convert_single_images_frame, # self.controller.all_common_variables.convert_image_output_variable, # *self.controller.all_common_variables.convert_image_output_options) #self.convert_single_images_choose_output_option.grid(row=1, column=0) # #self.convert_single_images_button = tk.Button(self.convert_single_images_frame, text='Convert single images', # command=self.controller.all_common_functions.convert_single_images_func) #self.convert_single_images_button.grid(row=3, column=0) self.playback_speed_label = tk.Label( self.convert_images_frame, text='Playback speed') self.playback_speed_label.grid(row=1, column=0) self.playback_speed_optionmenu = tk.OptionMenu( self.convert_images_frame, self.controller.all_common_variables.playback_speed_var, *self.controller.all_common_variables.playback_speed_options ) self.playback_speed_optionmenu.grid(row=2, column=0) self.animal_detection_method_simulation_label_top = tk.Label( self.convert_images_frame, text='Animal Detection') self.animal_detection_method_simulation_label_top.grid(row=1, column=1) self.animal_detection_method_stimulation_label_bottom = \ tk.Label(self.convert_images_frame, textvariable=self.controller.all_common_variables.animal_detection_method_var) self.animal_detection_method_stimulation_label_bottom.grid(row=2, column=1) ''' self.debug_model_label_top = tk.Label(self.convert_images_frame, text='Debug Mode:') self.debug_model_label_top.grid(row=1, column=2) self.debug_model_label_bottom = tk.Label( self.convert_images_frame, textvariable=self.controller.all_common_variables.debug_mode_var) self.debug_model_label_bottom.grid(row=2, column=2) ''' self.track_button = tk.Button( subframe_preexperiment, text='Track single animal', command=lambda: self.controller.call_start_experiment_function(self.controller.current_window)) self.track_button.grid(row=3, column=0, columnspan=2) def time_dependent_stimulus_frame(self, frame): self.subframe_timedep = tk.LabelFrame( self.subframe_expcontrol, text='Time dependent Stimulation') self.subframe_timedep.grid(row=3, column=0, columnspan=2) self.select_timedep_stim = tk.Button( self.subframe_timedep, text='Select Time Dependent Stim File', command=self.controller.all_common_functions.select_time_dependent_stim_func) self.select_timedep_stim.grid(row=0, column=0) #self.chosen_timedep_file = tk.Label(self.subframe_timedep, text='None selected') self.chosen_timedep_file_label = tk.Label( self.subframe_timedep, textvariable=self.controller.all_common_variables.chosen_timedep_file_display_var) self.chosen_timedep_file_label.grid(row=1, column=0) # def batch_analysis(self, subframe_preexperiment): # """ # Create the frame with the batch tools option # :param subframe_preexperiment: # :return: # """ # self.multilplecheck = tk.Checkbutton(self, text='Batch Analysis', # variable = self.controller.all_common_variables.multiple_folder_analysis_var, # onvalue=True, offvalue=False) # self.controller.all_common_variables.multiple_folder_analysis_var.set(False) # todo - make saveable using previous variables # self.multiplecheck.grid() def camera_frame_func(self, frame): if RASPBERRY or virtual_raspberry: self.sub_subframe_cam = tk.LabelFrame(frame, text='Camera Control', font='Helvetica 18 bold') self.sub_subframe_cam.grid(row=0, column=1, sticky='W') # the camera ON button self.cam_on_button = tk.Button(self.sub_subframe_cam, text='Cam On', fg='green', command=self.controller.all_common_functions.cam_on_func) self.cam_on_button.grid(row=0, column=1) # the camera OFF button self.cam_off_button = tk.Button(self.sub_subframe_cam, text='Cam Off', fg='red', command=self.controller.all_common_functions.cam_off_func) self.cam_off_button.grid(row=0, column=2) # the preview size scale self.preview_size_scale = tk.Scale(self.sub_subframe_cam, from_=180, to=1000, resolution=20, label='Preview Window Size', variable=self.controller.all_common_variables.preview_window_size_variable, orient='horizontal', len=180) self.preview_size_scale.grid(row=0, column=0, columnspan=1, rowspan=2) self.preview_size_scale.set(180) class CommonFunctions(): def __init__(self, camera_class, controller, private=False): self.controller = controller self.camera_class = camera_class self.cam = self.camera_class.cam def update_framerate_func(self, new_framerate): if RASPBERRY: # The camera can only support a given framerate at a given resolution. See full docs here: # https://picamera.readthedocs.io/en/release-1.13/fov.html # I'm pretty sure that the Camera (F) from Waveshare is v1 module as 5MP are indicated (which is pretty much # 2592x1944 if self.controller.all_common_variables.resolution == '640x480': if new_framerate <= 90 and new_framerate > 0: self.cam.framerate = new_framerate else: tk.messagebox.showerror("Error", "'With resolution of 640x480 the \nframerate must be between 1 and 90") print('With resolution of 640x480 the framerate must be between 1 and 90') elif self.controller.all_common_variables.resolution == '1024x768': if new_framerate <= 42 and new_framerate > 0: self.cam.framerate = new_framerate else: tk.messagebox.showerror("Error", "'With resolution of 1024x768 the \nframerate must be between 1 and 42") print('With resolution of 1024x768 the framerate must be between 1 and 42') elif self.controller.all_common_variables.resolution == '1296x972': if new_framerate <= 42 and new_framerate > 0: self.cam.framerate = new_framerate else: tk.messagebox.showerror("Error", "'With resolution of 1296x972 the \nframerate must be between 1 and 42") print('With resolution of 1296x972 the framerate must be between 1 and 42') elif self.controller.all_common_variables.resolution == '1920x1080': if new_framerate <= 30 and new_framerate > 0: self.cam.framerate = new_framerate else: tk.messagebox.showerror("Error", "'With resolution of 1920x1080 the \nframerate must be between 1 and 15") print('With resolution of 1920x1080 the framerate must be between 1 and 30') elif self.controller.all_common_variables.resolution == '2592x1944': if new_framerate <= 15 and new_framerate > 0: self.cam.framerate = new_framerate else: tk.messagebox.showerror("Error", "'With resolution of 2592x1944 the \nframerate must be between 1 and 15") print('With resolution of 2592x1944 the framerate must be between 1 and 15') else: tk.messagebox.showwarning("Information", "With resolution of 640x480 \nthe framerate must be between 1 and 90") print('you have pressed the update framerate button') def menu_callback_shared(self, cam, backlight, analog_output_one): # , backlight, analog_output_one):#, backlight,analog_output_one): self.controller.after(700, lambda: self.menu_callback_shared(cam, backlight, analog_output_one)) if cam: if RASPBERRY: # if self.cam.framerate != int(self.controller.sub_frames.recording_framerate.get("1.0", 'end-1c')): # if int(self.controller.sub_frames.recording_framerate.get("1.0", 'end-1c')) > 90 or int( # self.controller.sub_frames.recording_framerate.get("1.0", 'end-1c')) < 1: # print('please enter a framerate between 1 and 90') # else: # self.cam.framerate = int(self.controller.sub_frames.recording_framerate.get("1.0", 'end-1c')) # print('set Framerate to: +' + repr(self.cam.framerate)) if self.controller.all_common_variables.resolution != self.controller.all_common_variables.resolution_variable.get(): if self.controller.all_common_variables.resolution_variable.get() == '640x480': self.controller.all_common_variables.resolution = '640x480' self.cam.resolution = (640, 480) self.controller.all_common_variables.pixel_per_mm_var.set(0) print('Changed resolution to 640x480') if self.controller.all_common_variables.resolution_variable.get() == '1024x768': self.controller.all_common_variables.resolution = '1024x768' self.cam.resolution = (1024, 768) self.controller.all_common_variables.pixel_per_mm_var.set(0) print('Changed resolution to 1024x768') if self.controller.all_common_variables.resolution_variable.get() == '1296x972': self.controller.all_common_variables.resolution = '1296x972' self.cam.resolution = (1296, 972) self.controller.all_common_variables.pixel_per_mm_var.set(0) print('Changed resolution to 1296x730') if self.controller.all_common_variables.resolution_variable.get() == '1920x1080': self.controller.all_common_variables.resolution = '1920x1080' self.cam.resolution = (1920, 1080) self.controller.all_common_variables.pixel_per_mm_var.set(0) print('Changed resolution to 1920x1080') #if self.controller.all_common_variables.resolution_variable.get() == '2592x1944': # self.controller.all_common_variables.resolution = '2592x1944' # self.cam.resolution = (2592, 1944) # self.controller.all_common_variables.pixel_per_mm_var.set(0) # print('Changed resolution to 2592x1944') # In case the arena has been defined it needs to be # removed as different resolutions require different arenas! if self.controller.all_common_variables.overlay is not None: #self.controller.all_common_functions.preview_overlay_func() try: self.controller.all_common_variables.cam.remove_overlay( self.controller.all_common_variables.overlay) except Exception as ex: print('Tried removing overlay. This happened:') print(ex) # then set the overlay_image... self.controller.all_common_variables.overlay_image = None # ... and the overlay variable back to None self.controller.all_common_variables.overlay = None self.controller.all_common_variables.vr_arena = None self.controller.all_common_variables.vr_arena_name = None # always remove the animal that was placed before try: self.controller.all_common_variables.animal_annotation.remove() except (AttributeError, ValueError): pass # Finally, remove the arena in the GUI if self.controller.all_common_variables.resolution == '640x480': width, height = 640, 480 elif self.controller.all_common_variables.resolution == '1024x768': width, height = 1024, 768 elif self.controller.all_common_variables.resolution == '1296x972': width, height = 1296, 972 self.controller.access_subframes( self.controller.current_window).sub_frames.image_of_arena.set_data( np.zeros((height, width))) self.controller.access_subframes( self.controller.current_window).sub_frames.ax_vr_arena.figure.canvas.draw() # Check whether the user has used the scale to change preview window size by comparing the variable that # the preview window size is currently set to, to the value that the variable that is bound to the scale if self.controller.all_common_variables.preview_window_size_value != \ self.controller.all_common_variables.preview_window_size_variable.get(): # If the current preview window size value is different to the variable that is bound to the scale first # change the preview window size value to the value defined by the variable self.controller.all_common_variables.preview_window_size_value = \ self.controller.all_common_variables.preview_window_size_variable.get() # To be a bit more concise, define the size variable size = self.controller.all_common_variables.preview_window_size_value # then change the preview window size to the requested size self.cam.preview_window = (0, 0, int(size), int(size) ) # After changing the preview window size, the script checks if a VR overlay is present. If yes # it's size needs to be adjusted as well! if self.controller.all_common_variables.overlay is not None: self.controller.all_common_variables.overlay.window = \ (0, 0, int(size), int(size) ) if backlight: if RASPBERRY: # Check whether the user has used the scale to change backlight intensity by comparing the variable that # the background intensity is currently set to, to the value that the variable that is bound to the scale if self.controller.all_common_variables.backlight_intensity_value != \ self.controller.all_common_variables.backlight_intensity_variable.get(): # If the current backlight intensity value is different to the variable that is bound to the scale self.controller.all_common_variables.backlight_intensity_value = \ self.controller.all_common_variables.backlight_intensity_variable.get() # set the PWM dutycycle to value chosen by user. # Manual: Starts hardware PWM on a GPIO at the specified frequency and dutycycle. # http://abyz.me.uk/rpi/pigpio/python.html#hardware_PWM #self.controller.all_common_variables.pwm_object.hardware_PWM(BACKGROUND_GPIO, 3000000, # self.controller.all_common_variables.backlight_intensity_value) for i in range(len(self.controller.all_common_variables.background)): if self.controller.all_common_variables.background[i][1] > MAX_PWM_FREQ: self.controller.all_common_variables.pwm_object.hardware_PWM( self.controller.all_common_variables.background[i][0], self.controller.all_common_variables.background[i][1], self.controller.all_common_variables.backlight_intensity_value) else: self.controller.all_common_variables.pwm_object.set_PWM_dutycycle( self.controller.all_common_variables.background[i][0], int(round(self.controller.all_common_variables.backlight_intensity_value* (self.controller.all_common_variables.pwm_range/1000000)))) # Check whether the user has used the scale to change backlight intensity by comparing the variable that # the background intensity is currently set to, to the value that the variable that is bound to the scale if self.controller.all_common_variables.backlight_two_intensity_value != \ self.controller.all_common_variables.backlight_two_intensity_variable.get(): # If the current backlight intensity value is different to the variable that is bound to the scale self.controller.all_common_variables.backlight_two_intensity_value = \ self.controller.all_common_variables.backlight_two_intensity_variable.get() # set the PWM dutycycle to value chosen by user. # Manual: Starts hardware PWM on a GPIO at the specified frequency and dutycycle. # http://abyz.me.uk/rpi/pigpio/python.html#hardware_PWM for i in range(len(self.controller.all_common_variables.background_two)): if self.controller.all_common_variables.background_two[i][1] > MAX_PWM_FREQ: self.controller.all_common_variables.pwm_object.hardware_PWM( self.controller.all_common_variables.background_two[i][0], self.controller.all_common_variables.background_two[i][1], self.controller.all_common_variables.backlight_two_intensity_value) else: self.controller.all_common_variables.pwm_object.set_PWM_dutycycle( self.controller.all_common_variables.background_two[i][0], int(round(self.controller.all_common_variables.backlight_two_intensity_value* (self.controller.all_common_variables.pwm_range/1000000)))) print('backlight intensity value updated to: ' + repr(self.controller.all_common_variables.backlight_two_intensity_value)) if analog_output_one: if RASPBERRY: # Check whether the user has used the scale to change analog output intensity by comparing the variable that # the analog output intensity is currently set to, to the value that the variable that is bound to the scale if self.controller.all_common_variables.channel_one_dutycycle != \ self.controller.all_common_variables.channel_one_variable.get(): # If the current analog output intensity value is different to the variable that is bound to the scale self.controller.all_common_variables.channel_one_dutycycle = \ self.controller.all_common_variables.channel_one_variable.get() for i in range(len(self.controller.all_common_variables.channel_one)): if self.controller.all_common_variables.channel_one[i][1] > MAX_PWM_FREQ: self.controller.all_common_variables.pwm_object.hardware_PWM( gpio=self.controller.all_common_variables.channel_one[i][0], PWMfreq=self.controller.all_common_variables.channel_one[i][1], PWMduty=int(self.controller.all_common_variables.channel_one_dutycycle* (1000000/self.controller.all_common_variables.pwm_range))) # NOTE: Not sure if this is the best way as it might mask problems for users! else: if self.controller.all_common_variables.high_power_LEDs_bool.get(): self.controller.all_common_variables.pwm_object.set_PWM_dutycycle( user_gpio=self.controller.all_common_variables.channel_one[i][0], dutycycle=int(self.controller.all_common_variables.pwm_range - self.controller.all_common_variables.channel_one_dutycycle)) else: self.controller.all_common_variables.pwm_object.set_PWM_dutycycle( user_gpio=self.controller.all_common_variables.channel_one[i][0], dutycycle=self.controller.all_common_variables.channel_one_dutycycle) if self.controller.all_common_variables.channel_two_dutycycle != \ self.controller.all_common_variables.channel_two_variable.get(): # If the current analog output intensity value is different to the variable that is bound to the scale self.controller.all_common_variables.channel_two_dutycycle = \ self.controller.all_common_variables.channel_two_variable.get() for i in range(len(self.controller.all_common_variables.channel_two)): if self.controller.all_common_variables.channel_two[i][1] > MAX_PWM_FREQ: self.controller.all_common_variables.pwm_object.hardware_PWM( gpio=self.controller.all_common_variables.channel_two[i][0], PWMfreq=self.controller.all_common_variables.channel_two[i][1], PWMduty=int(self.controller.all_common_variables.channel_two_dutycycle* (1000000/self.controller.all_common_variables.pwm_range))) # NOTE: Not sure if this is the best way as it might mask problems for users! else: if self.controller.all_common_variables.high_power_LEDs_bool.get(): self.controller.all_common_variables.pwm_object.set_PWM_dutycycle( user_gpio=self.controller.all_common_variables.channel_two[i][0], dutycycle=int(self.controller.all_common_variables.pwm_range - self.controller.all_common_variables.channel_two_dutycycle)) else: self.controller.all_common_variables.pwm_object.set_PWM_dutycycle( user_gpio=self.controller.all_common_variables.channel_two[i][0], dutycycle=self.controller.all_common_variables.channel_two_dutycycle) if self.controller.all_common_variables.channel_three_dutycycle != \ self.controller.all_common_variables.channel_three_variable.get(): # If the current analog output intensity value is different to the variable that is bound to the scale self.controller.all_common_variables.channel_three_dutycycle = \ self.controller.all_common_variables.channel_three_variable.get() for i in range(len(self.controller.all_common_variables.channel_three)): if self.controller.all_common_variables.channel_three[i][1] > MAX_PWM_FREQ: self.controller.all_common_variables.pwm_object.hardware_PWM( gpio=self.controller.all_common_variables.channel_three[i][0], PWMfreq=self.controller.all_common_variables.channel_three[i][1], PWMduty=int(self.controller.all_common_variables.channel_three_dutycycle* (1000000/self.controller.all_common_variables.pwm_range))) # NOTE: Not sure if this is the best way as it might mask problems for users! else: if self.controller.all_common_variables.high_power_LEDs_bool.get(): self.controller.all_common_variables.pwm_object.set_PWM_dutycycle( user_gpio=self.controller.all_common_variables.channel_three[i][0], dutycycle=int(self.controller.all_common_variables.pwm_range - self.controller.all_common_variables.channel_three_dutycycle)) else: self.controller.all_common_variables.pwm_object.set_PWM_dutycycle( user_gpio=self.controller.all_common_variables.channel_three[i][0], dutycycle=self.controller.all_common_variables.channel_three_dutycycle) if self.controller.all_common_variables.channel_four_dutycycle != \ self.controller.all_common_variables.channel_four_variable.get(): # If the current analog output intensity value is different to the variable that is bound to the scale self.controller.all_common_variables.channel_four_dutycycle = \ self.controller.all_common_variables.channel_four_variable.get() for i in range(len(self.controller.all_common_variables.channel_four)): if self.controller.all_common_variables.channel_four[i][1] > MAX_PWM_FREQ: self.controller.all_common_variables.pwm_object.hardware_PWM( gpio=self.controller.all_common_variables.channel_four[i][0], PWMfreq=self.controller.all_common_variables.channel_four[i][1], PWMduty=int(self.controller.all_common_variables.channel_four_dutycycle* (1000000/self.controller.all_common_variables.pwm_range))) # NOTE: Not sure if this is the best way as it might mask problems for users! else: if self.controller.all_common_variables.high_power_LEDs_bool.get(): self.controller.all_common_variables.pwm_object.set_PWM_dutycycle( user_gpio=self.controller.all_common_variables.channel_four[i][0], dutycycle=int(self.controller.all_common_variables.pwm_range - self.controller.all_common_variables.channel_four_dutycycle)) else: self.controller.all_common_variables.pwm_object.set_PWM_dutycycle( user_gpio=self.controller.all_common_variables.channel_four[i][0], dutycycle=self.controller.all_common_variables.channel_four_dutycycle) # if self.controller.sub_frames.distance_label.cget('text') != \ # repr(self.controller.all_common_variables.pixel_per_mm)[0:10]: # print('changing to ' + repr(self.controller.all_common_variables.pixel_per_mm)[0:10]) # self.controller.sub_frames.distance_label.configure( # text=repr(self.controller.all_common_variables.pixel_per_mm)[0:10]) # print(self.controller.sub_frames.distance_label.cget('text')) if self.controller.all_common_variables.experiment_observation_mode_output != \ self.controller.all_common_variables.experiment_observation_mode.get(): # todo - make into loop - less readability but easier to implement different versions # todo - somethings wrong with this - it doesn't seem to react to user input # for experiment_observation_mode_loop in range(len(self.controller.all_common_variables.available_modes)): # todo - doesn't change...why? if self.controller.all_common_variables.experiment_observation_mode.get() == 'only animal': self.controller.all_common_variables.experiment_observation_mode_output = 'only animal' print('only animal chosen') if self.controller.all_common_variables.experiment_observation_mode.get() == 'overview': self.controller.all_common_variables.experiment_observation_mode_output = 'overview' print('overview chosen') if self.controller.all_common_variables.experiment_observation_mode.get() == 'none': self.controller.all_common_variables.experiment_observation_mode_output = 'none' print('no preview will be displayed') if self.controller.all_common_variables.experiment_observation_mode.get() == 'debug': self.controller.all_common_variables.experiment_observation_mode_output = 'debug' print('debug mode selected') if self.controller.all_common_variables.model_organism_value != \ self.controller.all_common_variables.model_organism_variable.get(): self.controller.all_common_variables.model_organism_value = \ self.controller.all_common_variables.model_organism_variable.get() print('Model organism: ' + repr(self.controller.all_common_variables.model_organism_value) + ' selected!') def cam_on_func(self): print('cam on') # todo - delete if in final version if RASPBERRY: self.controller.all_common_variables.preview_bool = True size = int(self.controller.all_common_variables.preview_window_size_value) self.cam.resolution = self.controller.all_common_variables.resolution self.cam.preview_window = (0, 0, size, size) self.cam.zoom = (0, 0, 1, 1) self.cam.start_preview() self.cam.preview.fullscreen = False # if overlay was previously defined, re-create it if self.controller.all_common_variables.overlay is not None: self.controller.all_common_functions.preview_overlay_func() #self.controller.all_common_variables.overlay.layer = 3 # wait a second so the camera adjusts before the user has # the chance to change anything # TODO check if necessary! time.sleep(1) def cam_off_func(self): print("cam off") # todo - delete in final version if RASPBERRY: self.controller.all_common_variables.preview_bool = False self.cam.stop_preview() # if overlay is active remove it. if self.controller.all_common_variables.overlay is not None: try: self.controller.all_common_variables.cam.remove_overlay( self.controller.all_common_variables.overlay) except Exception as ex: # This happens when the user presses the "cam # off" button more than once... pass def distance_configuration_func(self): if RASPBERRY: if self.controller.all_common_variables.resolution == '640x480': resolution = [640, 480] if self.controller.all_common_variables.resolution == '1024x768': resolution = [1024, 768] if self.controller.all_common_variables.resolution == '1296x972': resolution = [1296, 972] if self.controller.all_common_variables.resolution == '1920x1080': resolution = [1920, 1080] if self.controller.all_common_variables.resolution == '2592x1944': resolution = [2592, 1944] distance_factor = \ distance_configuration_module.DistanceConfigurationLive(cam=self.cam, resolution=resolution, known_distance=self.controller.all_common_variables.known_distance) else: distance_factor = distance_configuration_module.DistanceConfigurationStatic( known_distance=self.controller.all_common_variables.known_distance) # This variable is the number that is used internally self.controller.all_common_variables.pixel_per_mm_var.set(round(distance_factor.distance_factor,3)) # Check what exactly this variable is doing and write in comments self.controller.all_common_variables.known_distance = distance_factor.known_distance self.controller.all_common_variables.display_pixel_per_mm_variable.set( repr(distance_factor.distance_factor)[0:10]) print('There are ' + repr(self.controller.all_common_variables.pixel_per_mm_var.get()) + ' pixel per mm ') try: # todo - probably don't need that! rounded_value = str(round(self.controller.all_common_variables.pixel_per_mm_var.get(),1)) self.controller.access_subframes( self.controller.current_window).sub_frames.pixel_per_mm_label.configure( text='Pixel/mm: ' + rounded_value) # todo - check if this is the best way to update the pixel_per_mm in the fix_metadata without breaking # Todo - online pixel_per_mm except: pass def select_folder_to_save_exp_func(self): self.controller.all_common_variables.path_entry_variable.set( filedialog.askdirectory(initialdir=self.controller.all_common_variables.path_entry_variable.get())) def select_images(self, display_settings=False): self.controller.all_common_variables.data_path = filedialog.askdirectory() self.controller.all_common_variables.image_path.set(self.controller.all_common_variables.data_path) os.chdir(self.controller.all_common_variables.data_path) if display_settings: self.controller.all_common_variables.path = self.controller.all_common_variables.data_path path_to_show = '...' + self.controller.all_common_variables.data_path[-30:] self.controller.access_subframes( self.controller.current_window).sub_frames.pathchosen.configure(text=path_to_show) try: with open((self.controller.all_common_variables.data_path + '/experiment_settings.json'), 'r') as file: self.controller.all_common_variables.experimental_metadata = json.load(file) # Comment on why I use try..except instead of if..else. It seems that in Python it is prefered to use # the try..except style if one does not expect too many excepts to come true. try: self.controller.access_subframes( self.controller.current_window).sub_frames.experiment_date_n_time_label.configure( text=self.controller.all_common_variables.experimental_metadata['Experiment Date and Time']) except KeyError: self.controller.access_subframes( self.controller.current_window).sub_frames.experiment_date_n_time_label.configure( text='No Data') try: self.controller.access_subframes( self.controller.current_window).sub_frames.framerate_label.configure( text=repr(self.controller.all_common_variables.experimental_metadata['Framerate']) + 'fps') self.controller.all_common_variables.framerate_read_from_experiment_settings = \ self.controller.all_common_variables.experimental_metadata['Framerate'] except KeyError: self.controller.access_subframes( self.controller.current_window).sub_frames.framerate_labelconfigure(text='No Data') self.controller.all_common_variables.framerate_read_from_experiment_settings = None try: self.controller.access_subframes( self.controller.current_window).sub_frames.recording_time_label.configure( text=repr( self.controller.all_common_variables.experimental_metadata['Recording time']) + 's') except KeyError: self.controller.access_subframes( self.controller.current_window).sub_frames.recording_time_label.configure(tex='No Data') try: self.controller.access_subframes( self.controller.current_window).sub_frames.resolution_label.configure( text=self.controller.all_common_variables.experimental_metadata['Resolution']) self.controller.all_common_variables.resolution_read_from_experiment_settings = \ self.controller.all_common_variables.experimental_metadata['Resolution'] except KeyError: self.controller.access_subframes( self.controller.current_window).sub_frames.resolution_label.configure(text='No Data') self.controller.all_common_variables.resolution_read_from_experiment_settings = None try: self.controller.access_subframes( self.controller.current_window).sub_frames.species_label.configure( text=self.controller.all_common_variables.experimental_metadata['Model Organism']) self.controller.all_common_variables.organism_read_from_experiment_settings = \ self.controller.all_common_variables.experimental_metadata['Model Organism'] except KeyError: self.controller.access_subframes( self.controller.current_window).sub_frames.species_label.configure(text='No Data') self.controller.all_common_variables.organism_read_from_experiment_settings = None # changed name of 'genotype' to 'exp. group' to be more generic. Reflect that change here and # make sure not to break backwards compatibility if 'Exp. group' in self.controller.all_common_variables.experimental_metadata: self.controller.access_subframes( self.controller.current_window).sub_frames.genotype_label.configure( text=self.controller.all_common_variables.experimental_metadata['Exp. group']) self.controller.all_common_variables.genotype_read_from_experiment_settings = \ self.controller.all_common_variables.experimental_metadata['Exp. group'] elif 'Genotype' in self.controller.all_common_variables.experimental_metadata: self.controller.access_subframes( self.controller.current_window).sub_frames.genotype_label.configure( text=self.controller.all_common_variables.experimental_metadata['Genotype']) self.controller.all_common_variables.genotype_read_from_experiment_settings = \ self.controller.all_common_variables.experimental_metadata['Genotype'] #try: # self.controller.access_subframes( # self.controller.current_window).sub_frames.genotype_label.configure( # text=self.controller.all_common_variables.experimental_metadata['Genotype']) # self.controller.all_common_variables.genotype_read_from_experiment_settings = \ # self.controller.all_common_variables.experimental_metadata['Genotype'] #except KeyError: # self.controller.access_subframes( # self.controller.current_window).sub_frames.genotype_label.configure(text='No Data') # self.controller.all_common_variables.genotype_read_from_experiment_settings = None try: self.controller.access_subframes( self.controller.current_window).sub_frames.pixel_per_mm_label.configure( text=round(self.controller.all_common_variables.experimental_metadata['Pixel per mm'],2)) self.controller.all_common_variables.pixel_per_mm_read_from_experiment_settings = \ round(self.controller.all_common_variables.experimental_metadata['Pixel per mm'],2) except KeyError: self.controller.access_subframes( self.controller.current_window).sub_frames.pixel_per_mm_label.configure(text='No Data') self.controller.all_common_variables.pixel_per_mm_read_from_experiment_settings = None except IOError: self.controller.access_subframes( self.controller.current_window).sub_frames.experiment_date_n_time_label.configure(text='No Data') self.controller.access_subframes( self.controller.current_window).sub_frames.framerate_label.configure(text='No Data') self.controller.access_subframes( self.controller.current_window).sub_frames.recording_time_label.configure(text='No Data') self.controller.access_subframes( self.controller.current_window).sub_frames.resolution_label.configure(text='No Data') self.controller.access_subframes( self.controller.current_window).sub_frames.species_label.configure(text='No Data') self.controller.access_subframes( self.controller.current_window).sub_frames.genotype_label.configure(text='No Data') self.controller.access_subframes( self.controller.current_window).sub_frames.pixel_per_mm_label.configure(text='No Data') try: self.timestamps = np.load('timestamps.npy') self.controller.access_subframes( self.controller.current_window).sub_frames.timestamps_label.configure(text='Found') except IOError: self.controller.access_subframes( self.controller.current_window).sub_frames.timestamps_label.configure(text='No Data') def common_str_update_func(self): self.controller.all_common_variables.entered_text = self.controller.access_subframes( self.controller.current_window).sub_frames.common_str.get() files_and_folders_common = [p.replace('\\', '') for p in glob('*' + self.controller.all_common_variables.entered_text + '*')] self.controller.access_subframes(self.controller.current_window).sub_frames.common_str_counted.configure( text=len(files_and_folders_common)) ''' # DONT DELETE YET # function is called when user wants to explicitly convert jpgs to a npy array def convert_single_images_func(self): if self.controller.all_common_variables.path is None: tk.messagebox.showerror("Error", "You have to select a\n" "path in order to convert images") elif self.controller.all_common_variables.entered_text is None: user_said_its_ok = tk.messagebox.askokcancel("Warning", "Do you only have images in the folder?\n" "If yes, press 'OK', otherwise 'Cancel'\n" "and enter a string for the extension of\n" "of the images!") if self.controller.all_common_variables.path is not None and \ (self.controller.all_common_variables.entered_text is not None or user_said_its_ok): # Figure out how many files must be read files_to_read = [p.replace('\\', '') for p in glob('*' + self.controller.access_subframes( self.controller.current_window).sub_frames.common_str.get("1.0", 'end-1c') + '*')] sorted_filed_to_read = natsorted(files_to_read) temp = imread(files_to_read[0]) height, width = temp.shape[0], temp.shape[1] """ # Todo!! Write a correction algorithm for missing frames (especially if very fast recording!) if self.timestamps is not None: # first calculate the time it took to take each frame # Note: The timestamps are not the true time the picture was taken - after the picture is taken the # time is written down before the next frame is taken. So the timestamps are not 'true' but give us a good # hint when the frame was captured timestamps_subtracted = self.timestamps[1:,0] - np.roll(self.timestamps[:,0],1)[1:] if timestamps_subtracted.any() < (1/self.framerate)*2: """ if self.controller.all_common_variables.convert_image_output_variable.get() == '.npy': """ saving as mmep as the advantage that one can build arrays that are larger than the avaiable memory The drawback is that how it's currently implemented it will take much more SD space than a normal numpy array """ array = np.zeros((height, width, len(files_to_read)), dtype=temp.dtype) # with open("All_images.npy", 'w') as image_file: # mmapData = np.memmap("all_images.npy", mode='w+', shape=(height, width, len(files_to_read)), # dtype=temp.dtype) for i in range(len(sorted_filed_to_read)): # print(i) temp = imread(sorted_filed_to_read[i]) try: array[:, :, i] = temp[:, :, 0] # mmapData[:, :, i] = temp[:, :, 0] except IndexError: # in case the image is only grayscale array[:, :, i] = temp np.save('all_images.npy', array) elif self.controller.all_common_variables.convert_image_output_variable.get() == '.avi': """ not trivial - still needs to be implemented!! fig = Figure() ax = fig.add_subplot(111) ax.set_aspect('equal') ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) image = ax.imshow(temp, cmap='gray', interpolation='nearest') #im.set_clim([0, 1]) #fig.set_size_inches([5, 5]) #tight_layout() def update_img(i): tmp = np.random(300, 300) image.set_data(tmp) #temp = imread(sorted_filed_to_read[i]) #image.set_data(temp) return image # legend(loc=0) ani = animation.FuncAnimation(fig, update_img, 300, interval=30) writer = animation.writers['ffmpeg'](fps=30) ani.save('demo.mp4', writer=writer, dpi=100) return ani """ print('Still needs to be implemented!') ''' def quit_sequence(self, save=True, exit=True): if RASPBERRY or virtual_raspberry: if save: variables = {'Path': self.controller.all_common_variables.path_entry_variable.get(), 'Recording framerate': int(self.controller.all_common_variables.framerate_entry_variable.get()), 'Resolution': self.controller.all_common_variables.resolution, 'Recording length': int(self.controller.all_common_variables.recording_time_variable.get()), 'Background dutycycle': self.controller.all_common_variables.backlight_intensity_value, 'Background 2 dutycycle' : self.controller.all_common_variables.backlight_two_intensity_value, 'Exp. group' : self.controller.all_common_variables.genotype_entry_variable.get(), 'Observation mode': self.controller.all_common_variables.experiment_observation_mode_output, 'Window': self.controller.current_window, 'Organism': self.controller.all_common_variables.model_organism_value, 'Background': self.controller.all_common_variables.background, 'Background 2' : self.controller.all_common_variables.background_two, 'Channel 1': self.controller.all_common_variables.channel_one, 'Channel 2': self.controller.all_common_variables.channel_two, 'Channel 3': self.controller.all_common_variables.channel_three, 'Channel 4': self.controller.all_common_variables.channel_four, 'Channel 1 dutycycle': self.controller.all_common_variables.channel_one_dutycycle, 'Channel 2 dutycycle': self.controller.all_common_variables.channel_two_dutycycle, 'Channel 3 dutycycle': self.controller.all_common_variables.channel_three_dutycycle, 'Channel 4 dutycycle': self.controller.all_common_variables.channel_four_dutycycle, 'High Power LEDs': self.controller.all_common_variables.high_power_LEDs_bool.get(), 'Animal Detection Mode': self.controller.all_common_variables.animal_detection_method_var.get(), 'VR Body Part Stimulation': self.controller.all_common_variables.vr_stim_loc_var.get(), 'Animal Signal': self.controller.all_common_variables.signal, 'save centroids npy': self.controller.all_common_variables.save_centroids_npy.get(), 'save heads npy': self.controller.all_common_variables.save_heads_npy.get(), 'save tails npy': self.controller.all_common_variables.save_tails_npy.get(), 'save midpoints npy': self.controller.all_common_variables.save_midpoints_npy.get(), 'save bbox npy': self.controller.all_common_variables.save_bbox_npy.get(), 'save stim npy': self.controller.all_common_variables.save_stim_npy.get(), 'save sm_thresh npy': self.controller.all_common_variables.save_thresh_npy.get(), 'save sm_skeletons npy': self.controller.all_common_variables.save_skel_npy.get(), 'online undistort bool ': self.controller.all_common_variables.online_undistort_bool.get(), 'undistort radiobutton var': self.controller.all_common_variables.undistort_radiobutton_var.get(), 'opencv_information_box': self.controller.all_common_variables.opencv_information_box } try: if self.cam.exposure_mode == 'off': variables['exposure time'] = self.cam.exposure_speed else: # if autoexposure is on set the shutter speed to zero which allows it to float variables['exposure time'] = 0 except AttributeError: # if not on Raspberry an Attribute Error indicates that the cam object which in that case is # a string does not have exposure speed as a attribute pass if self.controller.all_common_variables.pixel_per_mm_var.get() != 0: variables['distance factor'] = self.controller.all_common_variables.pixel_per_mm_var.get() variables['known distance'] = self.controller.all_common_variables.known_distance try: variables['bbox size'] = int(self.boxsize.get("1.0", 'end-1c')) except: pass with open((self.controller.path_of_program + '/saved_variables.json'), 'w') as file: json.dump(variables, file, sort_keys=True, indent=4) if not RASPBERRY and not virtual_raspberry: if save: # thoughts - it's best to just save environments as json files - + is that different user can use # the same setup and not having to always set up the software again. # currently one can only save from the main window (tracking) as this is the one that has all the variables # change this and make it more specific for each window #variables = {'recording framerate': int( # self.controller.access_subframes(self.controller.current_window).sub_frames.recording_framerate.get( # "1.0", 'end-1c')), # # 'display framerate': int(self.controller.sub_frames.display_framerate.get("1.0", 'end-1c')), # 'observation mode': self.controller.all_common_variables.experiment_observation_mode_output, # 'Window': self.controller.current_window #} variables = {'Window': self.controller.current_window, 'save centroids npy': self.controller.all_common_variables.save_centroids_npy.get(), 'save heads npy': self.controller.all_common_variables.save_heads_npy.get(), 'save tails npy': self.controller.all_common_variables.save_tails_npy.get(), 'save midpoints npy': self.controller.all_common_variables.save_midpoints_npy.get(), 'save bbox npy': self.controller.all_common_variables.save_bbox_npy.get(), 'save stim npy': self.controller.all_common_variables.save_stim_npy.get(), 'save sm_thresh npy': self.controller.all_common_variables.save_thresh_npy.get(), 'save sm_skeletons npy': self.controller.all_common_variables.save_skel_npy.get(), 'online undistort bool': self.controller.all_common_variables.online_undistort_bool.get(), 'undistort radiobutton var': self.controller.all_common_variables.undistort_radiobutton_var.get(), 'opencv_information_box': self.controller.all_common_variables.opencv_information_box } #if self.controller.all_common_variables.pixel_per_mm_var is not None: # variables['distance factor'] = self.controller.all_common_variables.pixel_per_mm_var.get() # variables['known distance'] = self.controller.all_common_variables.known_distance #print('saved the following variable: ' + variables['Window']) with open((self.controller.path_of_program + '/saved_variables.json'), 'w') as file: json.dump(variables, file, sort_keys=True, indent=4) # with open(('/'.join(os.path.realpath(__file__).split('/')[:-1]) + '/saved_variables.json'), # 'w') as file: # json.dump(variables, file, sort_keys=True, indent=4) if exit: # quit causes mainloop to stop, but keeps interpreter intact. app.quit() def update_software(self): """ This function calls the shell script "update_PiVR" located in the folder "Installation_update". This shell script will: 1) sudo apt-get update the Raspberry Pi 2) cd into the PiVR directory and git pull the newest version of the software from gitlab """ if RASPBERRY: if tk.messagebox.askokcancel('Update?', 'This will stop the PiVR software to update! You ' 'will have to restart the software after the ' 'update.\nDo you want to continue?'): # Safe the current path temporarilly current_path = os.path.abspath(os.getcwd()) # cd to the software path os.chdir(SOFTWARE_PATH) result = subprocess.run( ['/bin/bash', '-c', '/'.join(os.path.realpath(__file__).split('/')[:-1]) + '/Installation_update/update_PiVR.sh']) if result.returncode == 0: tk.messagebox.showinfo( 'Success!', 'Was able to connect to gitlab to download ' 'newest version!\n' 'Please restart software for update to take effect!') elif result.returncode == 1: tk.messagebox.showinfo( 'Failed!', 'Unable to connect to https://gitlab.com' '\nPlease make sure you are connected to the ' 'Internet!') else: print(result.returncode) # cd back to the previously used path. os.chdir(current_path) else: if tk.messagebox.askokcancel( 'Update?', 'This will stop the PiVR software to update! You ' 'will have to restart the software after the ' 'update.\nDo you want to continue?'): # Safe the current path temporarilly current_path = os.path.abspath(os.getcwd()) # cd to the software path os.chdir(SOFTWARE_PATH) process = subprocess.Popen(["git", "pull"], stdout=subprocess.PIPE) output = process.communicate() print(output[0]) tk.messagebox.showinfo('Update Information', output[0]) tk.messagebox.showinfo( 'Reminder', 'If successfully downloaded newest version, please ' 'remember to restart the software for updates to ' 'take effect!') # cd back to the previously used path. os.chdir(current_path) def software_info(self): """ Show version (and in future the license) of the software """ child_window_info = tk.Toplevel() child_window_info.wm_title('About') child_window_info.geometry("%dx%d%+d%+d" % (320, 420, 0, 0)) child_window_info.attributes("-topmost", True) # force on top # disable main window child_window_info.grab_set() logo = Image.open( self.controller.path_of_program + '/pics/PiVRLogo.ico').resize((200, 200)) logo_as_photo = ImageTk.PhotoImage(logo) display_logo = tk.Label(child_window_info, image=logo_as_photo) display_logo.image = logo_as_photo display_logo.grid(row=1, column=0, columnspan=2) title = tk.Label(child_window_info, text=__project__) title.config(font=("Arial", 30, "bold")) title.grid(row=0, column=0, columnspan=2) some_description = tk.Label(child_window_info, text='The software powering your ' 'home built RPi tracker') some_description.config(font=('Arial', 10,'italic')) some_description.grid(row=2, column=0, columnspan=2) author_label = tk.Label(child_window_info, text='Written by:') author_label.grid(row=3, column=0) author_read = tk.Label(child_window_info, text=__author__) author_read.grid(row=3,column=1) version_label = tk.Label(child_window_info, text='Version:') version_label.grid(row=4, column=0) version_read = tk.Label(child_window_info, text=__version__) version_read.grid(row=4, column=1) version_date = tk.Label(child_window_info, text='Release Date:') version_date.grid(row=5, column =0) version_data_read = tk.Label(child_window_info, text=__date__) version_data_read.grid(row=5, column=1) git_branch_label = tk.Label(child_window_info, text='Git branch:') git_branch_label.grid(row=6, column=0) git_branch = tk.Label(child_window_info, text=CURRENT_GIT_BRANCH) git_branch.grid(row=6, column=1) git_hash_label = tk.Label(child_window_info, text='Git hash:') git_hash_label.grid(row=7, column=0) git_hash = tk.Label(child_window_info, text = CURRENT_GIT_HASH[0:8]) git_hash.grid(row=7, column=1) license_label = tk.Label(child_window_info, text='License:') license_label.grid(row=8, column = 0) license_BSD = tk.Label(child_window_info, text='BSD-3-Clause') license_BSD.grid(row=8, column=1) """ def autoexp_callback_old(self): ''' The callback menu for the autoexposure button. When the user clicks on the autoexposure button this function will change both what is displayed in the GUI as well as changing the exposure_mode of the camera. NOTE: This has been retired 10th of October 2018 as the button moved from a the main frame to a popup window :return: ''' if self.controller.access_subframes( self.controller.current_window).sub_frames.auto_exposure_button['text'] == 'autoexp. on': self.controller.access_subframes( self.controller.current_window).sub_frames.auto_exposure_button['text'] = 'autoexp. off' # todo - delete if line below in final version if RASPBERRY: self.cam.exposure_mode = 'off' print(self.cam.exposure_speed) print('autoexposure: off') else: self.controller.access_subframes( self.controller.current_window).sub_frames.auto_exposure_button['text'] = 'autoexp. on' # todo - delete if line below in final version if RASPBERRY: self.cam.exposure_mode = 'auto' print(self.cam.exposure_speed) print('autoexposure: auto') """ def autoexp_callback(self): """ The callback menu for the autoexposure button. When the user clicks on the autoexposure button this function will change both what is displayed in the GUI as well as changing the exposure_mode of the camera. """ if self.controller.all_common_variables.auto_exposure_button['text'] == 'autoexp. on': self.controller.all_common_variables.auto_exposure_button['text'] = 'autoexp. off' # todo - delete if line below in final version if RASPBERRY: self.cam.exposure_mode = 'off' self.cam.shutter_speed = self.cam.exposure_speed print(self.cam.exposure_speed) print(self.cam.shutter_speed) else: self.controller.all_common_variables.auto_exposure_button['text'] = 'autoexp. on' # todo - delete if line below in final version if RASPBERRY: self.cam.exposure_mode = 'auto' self.cam.shutter_speed = 0 print(self.cam.exposure_speed) print(self.cam.shutter_speed) def select_signal(self): """ This function gets called when the user presses the 'Animal Color' option in the menubar. It will display an example of both dark and white signal and lets the user choose which of two options are correct. """ distance_configuration_module.AnimalColorSelection( path=self.controller.path_of_program, linux=LINUX, controller=self.controller) print(self.controller.all_common_variables.signal) def output_files_func(self): """ Creates the "Options->Output File" menu """ # create a new window and name it self.child = tk.Toplevel() self.child.wm_title('Output files to be saved') self.child.attributes("-topmost", True) # force on top # disable main window self.child.grab_set() explanation_output_files = tk.Label( self.child, justify=tk.LEFT, wraplength=300, text='If you want to have the same output files as the ' 'original version of PiVR you may select the relevant ' 'files here.' ) explanation_output_files.grid(row=0, column=0) ################################################## # Labelframe Data.csv data_csv_labelframe = tk.LabelFrame( self.child, text='Part of data.csv') data_csv_labelframe.grid(row=1, column=0, sticky='W') info_label = tk.Label( data_csv_labelframe, justify=tk.LEFT, wraplength=300, text='Note that no additional information is saved. ' 'This is purely for your convenience.' ) info_label.grid(row=0, column=0, columnspan=2) # Centroids.npy centroids_checkbutton = tk.Checkbutton( data_csv_labelframe, text='', variable=self.controller.all_common_variables.save_centroids_npy, onvalue=True, offvalue=False) centroids_checkbutton.grid(row=1, column=0) centroids_label = tk.Label( data_csv_labelframe, text='Save "centroids.npy" file') centroids_label.grid(row=1, column=1, sticky='W') # Heads.npy heads_checkbutton = tk.Checkbutton( data_csv_labelframe, text='', variable=self.controller.all_common_variables.save_heads_npy, onvalue=True, offvalue=False) heads_checkbutton.grid(row=2, column=0) heads_label = tk.Label( data_csv_labelframe, text='Save "heads.npy" file') heads_label.grid(row=2, column=1, sticky='W') # Tails.npy tails_checkbutton = tk.Checkbutton( data_csv_labelframe, text='', variable=self.controller.all_common_variables.save_tails_npy, onvalue=True, offvalue=False) tails_checkbutton.grid(row=3, column=0) tails_label = tk.Label( data_csv_labelframe, text='Save "tails.npy" file') tails_label.grid(row=3, column=1, sticky='W') # Midpoint.npy midpoint_checkbutton = tk.Checkbutton( data_csv_labelframe, text='', variable=self.controller.all_common_variables.save_midpoints_npy, onvalue=True, offvalue=False) midpoint_checkbutton.grid(row=4, column=0) midpoint_label = tk.Label( data_csv_labelframe, text='Save "midpoints.npy" file') midpoint_label.grid(row=4, column=1, sticky='W') # bounding_boxes.npy bboxes_checkbutton = tk.Checkbutton( data_csv_labelframe, text='', variable=self.controller.all_common_variables.save_bbox_npy, onvalue=True, offvalue=False) bboxes_checkbutton.grid(row=5,column=0) bbox_label = tk.Label( data_csv_labelframe, text='Save "bounding_boxes.npy" file') bbox_label.grid(row=5, column=1, sticky='W') # stimulation.npy stim_checkbutton = tk.Checkbutton( data_csv_labelframe, text='', variable=self.controller.all_common_variables.save_stim_npy, onvalue=True, offvalue=False) stim_checkbutton.grid(row=6, column=0) stim_label = tk.Label( data_csv_labelframe, text='Save "stimulation.npy" file') stim_label.grid(row=6, column=1, sticky='W') ################################################## # Labelframe binary image and skeleton image_labelframe = tk.LabelFrame( self.child, text='Extra Image Files') image_labelframe.grid(row=2, column=0, sticky='W') warning_label = tk.Label( image_labelframe, justify=tk.LEFT, wraplength=300, text='WARNING! \n' 'To save space it might make sense to not save ' 'the binary and skeleton images.\n' 'Be aware that there is currently no guaranteed ' 'way to reconstruct the binary images and skeletons.', fg = 'red' ) warning_label.grid(row=0, column=0, columnspan=2) # sm_thresh.npy binary_checkbutton = tk.Checkbutton( image_labelframe, text='', variable=self.controller.all_common_variables.save_thresh_npy, onvalue=True, offvalue=False) binary_checkbutton.grid(row=1, column=0) binary_label = tk.Label( image_labelframe, text='Save "sm_thresh.npy" file') binary_label.grid(row=1, column=1, sticky='W') # sm_skeletons.npy skeleton_checkbutton = tk.Checkbutton( image_labelframe, text='', variable=self.controller.all_common_variables.save_skel_npy, onvalue=True, offvalue=False) skeleton_checkbutton.grid(row=2, column=0) skeleton_label = tk.Label( image_labelframe, text='Save "sm_skeletons.npy" file') skeleton_label.grid(row=2, column=1, sticky='W') def undistort_online_func(self): """ This function creates the 'Option->Undistort Online' menu. Will only be created if CV2_INSTALLED == True """ # create a new window and name it self.child = tk.Toplevel() self.child.wm_title('Undistort Options') self.child.attributes("-topmost", True) # force on top # disable main window self.child.grab_set() explanation_undistort = tk.Label( self.child, justify=tk.LEFT, wraplength=300, text='The images can have visible distortion, depending ' 'on the lens used. The PiVR tracking algorithm can ' 'correct the extracted coordinates (e.g. centroid) ' 'during tracking. If you present a virtual reality ' 'the undistorted points will be used as position ' 'for the animal' ) explanation_undistort.grid(row=0, column=0) # Labelframe yes_no.csv yes_no_labelframe = tk.LabelFrame( self.child, text='') yes_no_labelframe.grid(row=1, column=0, sticky='W') # yes_no undistort_yes_no = tk.Checkbutton( yes_no_labelframe, text='', variable=self.controller.all_common_variables.online_undistort_bool, onvalue=True, offvalue=False) undistort_yes_no.grid(row=2, column=0) undistort_yes_no_label = tk.Label( yes_no_labelframe, text='Perform Undistort?') undistort_yes_no_label.grid(row=2, column=1, sticky='W') # Labelframe undistort files files_labelframe = tk.LabelFrame( self.child, text='Undistort Files') files_labelframe.grid(row=2, column=0, sticky='W') def undistort_path_func(): """ Function is called when radiobutton is clicked Note - easy to expand to add, e.g. HQ camera! Just add one elif below and copy paste the Radiobutton lines below and adapt the value. Also make sure to adapt the undistort_path initializiation in all_common_variables with then new value """ if self.controller.all_common_variables.undistort_radiobutton_var.get() == 0: self.controller.all_common_variables.undistort_path = \ Path(Path(os.path.realpath(SOFTWARE_PATH)), 'undistort_matrices', 'standard') elif self.controller.all_common_variables.undistort_radiobutton_var.get() == 1: self.controller.all_common_variables.undistort_path = \ Path(Path(os.path.realpath(SOFTWARE_PATH)), 'undistort_matrices', 'user_provided') # standard files standard_files_radiobutton = tk.Radiobutton( files_labelframe, justify=tk.LEFT, text="Use undistort files for standard lens", padx=20, variable=self.controller.all_common_variables.undistort_radiobutton_var, value=0, command=undistort_path_func) standard_files_radiobutton.grid(row=1, column=0,columnspan=1, sticky='W') user_files_radiobutton = tk.Radiobutton( files_labelframe, justify=tk.LEFT, text="Use your own undistort files", padx=20, variable=self.controller.all_common_variables.undistort_radiobutton_var, value=1, command=undistort_path_func) user_files_radiobutton.grid(row=2, column=0,columnspan=1, sticky='W') def select_output_channels(self): """ This function will let the user select which output channel (GPIO#x) corresponds to STIM_1 etc :return: """ output_channels.DefineOutputChannels(path=self.controller.path_of_program, controller=self.controller) def draw_VR_arena(self): """ Calls classes in 'VR_drawing_board.py' to allow user to draw virtual realities at different resolutions. It is probably best to just ask the user for the desired resolution. This is done in 'VR_drawing_board.py' """ VR_drawing_board.SelectResolution( path_of_program=self.controller.path_of_program) def select_time_dependent_stim_func(self): """ This function gets called when the user clicks on the 'Select Time Dependent Stim File' button a recording setting. The user has to choose an csv file. I don't think there will be enough space to actually display it so the only way to display it will be in a special menu where one can create the stimulation file. Todo Actually - it might be better to show the stimulus and the time...to be discussed! """ try: self.controller.all_common_variables.time_dependent_stim_file_name = filedialog.askopenfilename( initialdir=self.controller.path_of_program + '/time_dependent_stim', title="Select file", filetypes=(("csv files", "*.csv"), ("all files", "*.*"))) # read the file self.controller.all_common_variables.time_dependent_stimulation_file = \ pd.read_csv(self.controller.all_common_variables.time_dependent_stim_file_name , delimiter=',') # update the button label if LINUX: self.controller.all_common_variables.chosen_timedep_file_display_var.set( self.controller.all_common_variables.time_dependent_stim_file_name .split('\\')[-1]) else: self.controller.all_common_variables.chosen_timedep_file_display_var.set( self.controller.all_common_variables.time_dependent_stim_file_name .split('/')[-1]) #self.controller.access_subframes( # self.controller.current_window).sub_frames.chosen_timedep_file.configure( # text=name_of_file.split('/')[-1] #) # todo if want to display the stimulation file, put the code here except OSError: # if nothing has been selected self.controller.all_common_variables.time_dependent_stimulation_file = None self.controller.all_common_variables.chosen_timedep_file_display_var.set(None) #self.controller.access_subframes( # self.controller.current_window).sub_frames.chosen_timedep_file.configure('None selected') pass def select_vr_arena_func(self, dynamic_VR=False): """ This function gets called when the user clicks on the 'Select VR Arena' button in the VR experiment tab. The user has to choose a arena and it will be displayed as an extra window. Different arenas can be chosen: #) A static arena that will just be displayed "as is". Such an arena is identified using its name: "XX.csv" #) A static arena that will be displayed relative to starting position of the animal without taking the movement of the animal into account. Such an arena is identified using its name: "XX[320,240].csv". 320 and 240 stand for the x and y coordinates. For example, if the animal is at exactly 320,240 at the start of the experiment the arena will not be translated. If the animal is at 300, 240, the virtual arena will be translated by 20 pixels. #) A static arena that is both translated and rotated relative to the starting position of the animal. Such an arena is defined using its name: "XX.[320,240,1.5].csv". The first two number again define the translation while the third number, 1.5 in this case, defines the angle the arena is oriented relative to the movement of the animal. #) A dynamic arena. This arena can **not** be rotated nor translated. Dynamic arenas are defined as 3 dimensional numpy arrays with the third dimension encoding time. The dynamic arena is also defined by its name: "XXHz[Y].np" with Y being a floating point number between 0 and pi. This function handles these different types of arenas by saving the path to the chosen arena which will then be iven to the tracking class if the user starts an experiment. The function also handles the presention: if a static arena is chosen, the arena is presented in the small preview plot in the PiVR software. If no rotation nor translation is desired, an overlay is put over the preview window to help the user place the animal at the correct place. Obviously, this doesn't make sense if the arena is anyway translated/rotated afterwards, so no overlay is presented. """ # As soon as user clicks, reset everything. This enables the # user to "unselect" any arena and have a "clean slate". self.controller.all_common_variables.vr_arena_name = None if dynamic_VR: self.controller.all_common_variables.vr_arena_name = \ filedialog.askopenfilename( initialdir=self.controller.path_of_program + '/VR_arenas', title="Select file", filetypes=(("dynamic", "*.npy"), )) else: self.controller.all_common_variables.vr_arena_name = \ filedialog.askopenfilename( initialdir=self.controller.path_of_program + '/VR_arenas', title="Select file", filetypes=(("csv files", "*.csv"), )) # always remove the animal that was placed before try: self.controller.all_common_variables.animal_annotation.remove() except (AttributeError, ValueError): pass # check if overlay image has been defined. If yes, it means # that the user has selected a VR arena since starting up the # software. # If yes, remove the previous overlay if self.controller.all_common_variables.overlay_image is not \ None: try: self.controller.all_common_variables.cam.remove_overlay( self.controller.all_common_variables.overlay) except Exception as ex: print('Tried removing overlay. This happened:') print(ex) # then set the overlay_image... self.controller.all_common_variables.overlay_image = None # ... and the overlay variable back to None self.controller.all_common_variables.overlay = None # The if..elif below just collects the resolution in int if self.controller.all_common_variables.resolution == '640x480': width, height = 640, 480 elif self.controller.all_common_variables.resolution == '1024x768': width, height = 1024, 768 elif self.controller.all_common_variables.resolution == '1296x972': width, height = 1296, 972 # Now check what the user has provided...start with checking # if its a static arena if 'csv' in self.controller.all_common_variables.vr_arena_name: #print('csv in name') self.controller.all_common_variables.vr_arena_multidimensional = \ False # collect the string after animal_pos. If no animal_pos # is defined this will just take the whole string. animal_defined = \ self.controller.all_common_variables.vr_arena_name.split( 'animal_pos')[-1] # load the arena into memory self.controller.all_common_variables.vr_arena = \ np.genfromtxt( self.controller.all_common_variables.vr_arena_name, delimiter=',') # after loading arena into memory check if arena size fits with # current resolution if self.controller.all_common_variables.vr_arena.shape == (height,width): # Now check if the animal is placed..for this it's # necessary to do some string acrobatics try: # This will check if there's a "," and a "[" in the # string. As there's indexing here, this will fail ( # IndexError) if none of the two characters are # present and this will lead to no animal placed! self.controller.all_common_variables.placed_animal = \ [int(animal_defined.split(',')[0].split('[')[1])] # Now check after how many "," a "]" is present. If # after only one, no orientation has been defined. if ']' in animal_defined.split(',')[1]: print('no orientation') self.controller.all_common_variables.placed_animal.append( int(animal_defined.split(',')[1].split(']')[0])) # Else the desired orientation theta has been defined. elif ']' in animal_defined.split(',')[2]: self.controller.all_common_variables.placed_animal.append( int(animal_defined.split(',')[1])) self.controller.all_common_variables.placed_animal.append( float(animal_defined.split(',')[2].split(']')[0])) print('animal with orientation') except IndexError: # When analyzing the string and finding that no # animal position has been defined, the code # continues here. #print('no animal placed') self.controller.all_common_variables.placed_animal = None # only display overlay if **no** animal is placed # Create an empty array which will be used to create # the RGBA image RGB_arena = \ np.zeros(( self.controller.all_common_variables.vr_arena.shape[0], self.controller.all_common_variables.vr_arena.shape[1], 4), dtype=np.uint8) # The image only takes values between 0...255 (uint8) normalized_values = \ self.controller.all_common_variables.vr_arena \ * (255 / PWM_RANGE) # Now fill the empty array with the normalized values RGB_arena[:, :, 0] = np.around(a=(normalized_values)) RGB_arena[:, :, 3] = np.around(a=(normalized_values)) # For resolution other than 640x480 the overlay image must # be padded! padded_image = Image.new( 'RGB', ( ((RGB_arena.shape[1] + 31)//32) * 32, ((RGB_arena.shape[0] + 15)//16) * 16 )) padded_image.paste(Image.fromarray(RGB_arena, 'RGBA'), (0,0)) # And convert the array to an image object self.controller.all_common_variables.overlay_image = padded_image # call the function that puts the overlay over the # preview self.preview_overlay_func() print(self.controller.all_common_variables.placed_animal) # Here the plotting in the small figure in the center of # the PiVR software is done. First, the arena is set self.controller.access_subframes( self.controller.current_window).sub_frames.image_of_arena.set_data( self.controller.all_common_variables.vr_arena) if self.controller.all_common_variables.placed_animal is \ not None: # if only animal is placed, no orientation information # is given - therefore a circle is drawn at the given # position - the animal annotation is a common # variable which enables us to always delete it when # loading a new arena if len(self.controller.all_common_variables.placed_animal) == 2: circle = Circle(( self.controller.all_common_variables.placed_animal[0], self.controller.all_common_variables.placed_animal[1]), 10) self.controller.all_common_variables.animal_annotation = \ self.controller.access_subframes( self.controller.current_window).sub_frames.ax_vr_arena.add_artist(circle) else: # if theta is given (here it doesn't check for # values between -pi and +pi) first the x and y # position of the thin end of the arrow needs to # be calculated (the inverse of arctan2 with # length = 10) x_before = \ self.controller.all_common_variables.placed_animal[0] \ + 10 \ * np.cos(self.controller.all_common_variables.placed_animal[2]) y_before = \ self.controller.all_common_variables.placed_animal[1] \ + 10 \ * np.sin(self.controller.all_common_variables.placed_animal[2]) # This is where the arrow is plotted self.controller.all_common_variables.animal_annotation = \ self.controller.access_subframes( self.controller.current_window).sub_frames.ax_vr_arena.arrow( x_before, # x y_before, # y self.controller.all_common_variables.placed_animal[0] - x_before, # dx self.controller.all_common_variables.placed_animal[1] - y_before, # dy head_width=50, head_length=-25, fc='r', ec='r' ) # and finally the plot is updated self.controller.access_subframes( self.controller.current_window).sub_frames.ax_vr_arena.figure.canvas.draw() else: tk.messagebox.showerror( 'Invalid Resolution', 'The arena you selected has resolution ' + repr(self.controller.all_common_variables.vr_arena.shape[1]) + 'x' + repr(self.controller.all_common_variables.vr_arena.shape[0]) + '\nwhile the camera resolution is set to ' + repr(width) + 'x' + repr(height) + '\nPlease select arena with same resolution as camera!') # remove arena from memory self.controller.all_common_variables.vr_arena = None # and remove previously selected arena (if there was one) self.controller.access_subframes( self.controller.current_window).sub_frames.image_of_arena.set_data( np.zeros((height, width))) self.controller.access_subframes( self.controller.current_window).sub_frames.ax_vr_arena.figure.canvas.draw() # TODO: Shouldn't be necessary in here, right? if self.controller.all_common_variables.high_power_LEDs_bool: self.controller.all_common_variables.invert_arena_bool = True elif 'npy' in self.controller.all_common_variables.vr_arena_name: # This is checking whether the user selected a dynamic # virtual reality arnea # read the file: temp = np.load(self.controller.all_common_variables.vr_arena_name) if temp.shape[0] == 480 and temp.shape[1] == 640: try: self.controller.all_common_variables.vr_update_rate = \ int(self.controller.all_common_variables.vr_arena_name.split('Hz[')[-1].split('].npy')[0]) except ValueError: tk.messagebox.showerror( 'Please define update rate', 'The time-dependent file you have selected does ' 'not specify the update rate.' 'The update rate is defined in the name as' ' the follwing string: ' '\nHz[xxxx].npy ' '\nAn example file name would be: "test_Hz[15].npy"') self.controller.all_common_variables.vr_arena = temp self.controller.all_common_variables.vr_arena_multidimensional = True # TODO: Shouldn't be necessary in here, right? if self.controller.all_common_variables.high_power_LEDs_bool: self.controller.all_common_variables.invert_arena_bool = True # Create an empty array which will be used to create # the RGBA image used to display the overlay over the # preview window RGB_arena = \ np.zeros((self.controller.all_common_variables.vr_arena.shape[0], self.controller.all_common_variables.vr_arena.shape[1], 4), dtype=np.uint8) values = self.controller.all_common_variables.vr_arena[:,:,0] # Now fill the empty array with the values RGB_arena[:, :, 0] = np.around(a=(values)) RGB_arena[:, :, 3] = np.around(a=(values)) # And convert the array to an image object self.controller.all_common_variables.overlay_image = \ Image.fromarray(RGB_arena, 'RGBA') # call the function that puts the overlay over the # preview self.preview_overlay_func() # Here the plotting in the small figure in the center of # the PiVR software is done. First, the arena is set self.controller.access_subframes( self.controller.current_window).sub_frames.image_of_arena.set_data( self.controller.all_common_variables.vr_arena[:,:,0]) # and finally the plot is updated self.controller.access_subframes( self.controller.current_window).sub_frames.ax_vr_arena.figure.canvas.draw() else: tk.messagebox.showerror( 'Wrong spatial dimensions', 'For a virtual arena that changes over time please' 'only use numpy arrays with the size of ' '480x640xtime.' '\nThe arena you selected has the following ' 'dimensions: ' + repr(temp.shape[0]) + 'x' + repr(temp.shape[1])) else: # If nothing has been selected, clean up the plot. The # overlay, filename variables etc. have already been # reset to zero. if self.controller.all_common_variables.resolution == '640x480': width,height = 640, 480 elif self.controller.all_common_variables.resolution == '1024x768': width, height = 1024, 768 elif self.controller.all_common_variables.resolution == '1296x972': width, height = 1296, 972 self.controller.access_subframes( self.controller.current_window).sub_frames.image_of_arena.set_data( np.zeros((height,width))) self.controller.access_subframes( self.controller.current_window).sub_frames.ax_vr_arena.figure.canvas.draw() # TODO: Shouldn't be necessary in here, right? if self.controller.all_common_variables.high_power_LEDs_bool: self.controller.all_common_variables.invert_arena_bool = False def preview_overlay_func(self): """ This function is called whenever the virtual arena is supposed to be put on top of the overlay. It adds the overlay_image in all_common_variables to cam. It then adjusts the size of the overlay to the size of the preview window. """ if RASPBERRY: if self.controller.all_common_variables.preview_bool: # Problem: If cam already on and user clicks a second time # a new overlay is added. # To mitigate this, try to remove an hypothetical # overlay try: self.controller.all_common_variables.cam.remove_overlay( self.controller.all_common_variables.overlay) except Exception as ex: # This happens when the user presses the "cam # off" button more than once... pass self.controller.all_common_variables.overlay = \ self.cam.add_overlay( source=self.controller.all_common_variables.overlay_image.tobytes(), layer=3, alpha=150) size = int(self.controller.all_common_variables.preview_window_size_value) self.controller.all_common_variables.overlay.fullscreen = False self.controller.all_common_variables.overlay.window = \ (0, 0, size, size) else: # If user selects an arena while the preview window # is turned off, change the overlay variable from # None to True so that when the preview window is # turned on the overlay is presented! self.controller.all_common_variables.overlay = True elif RASPBERRY and \ self.controller.all_common_variables.resolution != '640x480': tk.messagebox.showerror( 'Wrong spatial dimension', 'For a virtual arena experiment the resolution must ' 'be set to' '\n640x480' '\n' '\nIt is currently set to: ' '\n' + self.controller.all_common_variables.resolution) def overwrite_metadata(self): """ This function is called when the user wants to update the metadata that is saved in each experimental folder in the file experiment_settings.json """ datetime = time.strftime("%d.%m.%Y_%H-%M-%S") try: # open the file with the history of the changed metadata with open((self.controller.all_common_variables.data_path + '/old_experiment_settings.json'), 'r') as file: all_old_experiment_settings = json.load(file) # also open the file that holds the metadata until now with open((self.controller.all_common_variables.data_path + '/experiment_settings.json'), 'r') as file: old_experiment_settings = json.load(file) # extract the metadata of the metadata up to now metadata_before_change = {} try: metadata_before_change['Experiment Date and Time'] = old_experiment_settings['Experiment Date and Time'] except KeyError: pass try: metadata_before_change['Framerate'] = old_experiment_settings['Framerate'] except KeyError: pass try: metadata_before_change['Pixel per mm'] = old_experiment_settings['Pixel per mm'] except KeyError: pass # to keep backwards compatibility after changing the 'genotype' to exp. control if 'Exp. group' in metadata_before_change: metadata_before_change['Exp. group'] = old_experiment_settings['Exp. group'] elif 'Genotype' in metadata_before_change: metadata_before_change['Genotype'] = old_experiment_settings['Genotype'] #try: # metadata_before_change['Genotype'] = old_experiment_settings['Genotype'] #except KeyError: # pass try: metadata_before_change['Resolution'] = old_experiment_settings['Resolution'] except KeyError: pass try: metadata_before_change['Recording time'] = old_experiment_settings['Recording time'] except KeyError: pass #try: # metadata_before_change['Bounding box size'] = # old_experiment_settings['Bounding box size'] #except KeyError: # pass try: metadata_before_change['Search box size'] = \ old_experiment_settings['Search box size'] except KeyError: pass try: metadata_before_change['Preview'] = old_experiment_settings['Preview'] except KeyError: pass try: metadata_before_change['Preview size'] = old_experiment_settings['Preview size'] except KeyError: pass try: metadata_before_change['Model Organism'] = old_experiment_settings['Model Organism'] except KeyError: pass # update the json file with all the old metadata all_old_experiment_settings.update(metadata_before_change) # and save it with open((self.controller.all_common_variables.data_path + '/old_experiment_settings.json'), 'w') as file: json.dump(all_old_experiment_settings, file, indent=4) except FileNotFoundError: print(self.controller.all_common_variables.data_path) with open((self.controller.all_common_variables.data_path + '/experiment_settings.json'), 'r') as file: old_experiment_settings = json.load(file) metadata_before_change = {} try: metadata_before_change['Experiment Date and Time'] = old_experiment_settings['Experiment Date and Time'] except KeyError: pass try: metadata_before_change['Framerate'] = old_experiment_settings['Framerate'] except KeyError: pass try: metadata_before_change['Pixel per mm'] = old_experiment_settings['Pixel per mm'] except KeyError: pass # to keep backwards compatibility after changing the 'genotype' to exp. control if 'Exp. group' in metadata_before_change: metadata_before_change['Exp. group'] = old_experiment_settings['Exp. group'] elif 'Genotype' in metadata_before_change: metadata_before_change['Genotype'] = old_experiment_settings['Genotype'] #try: # metadata_before_change['Genotype'] = old_experiment_settings['Genotype'] #except KeyError: # pass try: metadata_before_change['Resolution'] = old_experiment_settings['Resolution'] except KeyError: pass try: metadata_before_change['Recording time'] = old_experiment_settings['Recording time'] except KeyError: pass #try: # metadata_before_change['Bounding box size'] = # old_experiment_settings['Bounding box size'] #except KeyError: # pass try: metadata_before_change['Search box size'] = \ old_experiment_settings['Search box size'] except KeyError: pass try: metadata_before_change['Preview'] = old_experiment_settings['Preview'] except KeyError: pass try: metadata_before_change['Preview size'] = old_experiment_settings['Preview size'] except KeyError: pass try: metadata_before_change['Model Organism'] = old_experiment_settings['Model Organism'] except KeyError: pass with open((self.controller.all_common_variables.data_path + '/old_experiment_settings.json'), 'w') as file: json.dump(metadata_before_change, file, indent=4) # open the original file with all the experimental settings with open((self.controller.all_common_variables.data_path + '/experiment_settings.json'), 'r') as file: experiment_settings = json.load(file) # update the old values with the new values experiment_settings['Pixel per mm'] = self.controller.all_common_variables.pixel_per_mm_var.get() experiment_settings['Model Organism'] = self.controller.all_common_variables.experimental_metadata[ 'Model Organism'] experiment_settings['Experiment Date and Time'] = \ self.controller.all_common_variables.experimental_metadata['Experiment Date and Time'] # overwrite the original file with the experimental settings with open((self.controller.all_common_variables.data_path + '/experiment_settings.json'), 'w') as file: json.dump(experiment_settings, file, indent=4) def metadata_fix_date_time(self): """ Function currently not in use. Can be used to allow user interface to change experiment metadata using the GUI. """ # create a new window and name it self.child = tk.Toplevel() self.child.wm_title('Fix Date and Time') self.child.attributes("-topmost", True) # force on top # disable main window self.child.grab_set() # create a frame in the window self.child_frame = tk.Frame(self.child) self.day_label = tk.Label(self.child_frame, text='Day') self.day_label.grid(row=0, column=0) self.day_entry = tk.Entry(self.child_frame, width=5) self.day_entry.grid(row=1, column=0) # enter the original day self.day_entry.insert(tk.END, self.controller.all_common_variables.experimental_metadata['Experiment Date and Time']. split('.')[0]) self.month_label = tk.Label(self.child_frame, text='Month') self.month_label.grid(row=0, column=1) self.month_entry = tk.Entry(self.child_frame, width=5) self.month_entry.grid(row=1, column=1) # enter the original month self.month_entry.insert(tk.END, self.controller.all_common_variables.experimental_metadata['Experiment Date and Time']. split('.')[1]) self.year_label = tk.Label(self.child_frame, text='Year') self.year_label.grid(row=0, column=2) self.year_entry = tk.Entry(self.child_frame, width=5) self.year_entry.grid(row=1, column=2) # enter the original year self.year_entry.insert(tk.END, self.controller.all_common_variables.experimental_metadata['Experiment Date and Time']. split('.')[2].split('_')[0]) self.hour_label = tk.Label(self.child_frame, text='Hour') self.hour_label.grid(row=0, column=3) self.hour_entry = tk.Entry(self.child_frame, width=5) self.hour_entry.grid(row=1, column=3) # enter the original hour self.hour_entry.insert(tk.END, self.controller.all_common_variables.experimental_metadata['Experiment Date and Time']. split('-')[0].split('_')[1]) self.minutes_label = tk.Label(self.child_frame, text='Minutes') self.minutes_label.grid(row=0, column=4) self.minutes_entry = tk.Entry(self.child_frame, width=5) self.minutes_entry.grid(row=1, column=4) # enter the original hour self.minutes_entry.insert(tk.END, self.controller.all_common_variables.experimental_metadata[ 'Experiment Date and Time']. split('-')[1]) self.seconds_label = tk.Label(self.child_frame, text='Seconds') self.seconds_label.grid(row=0, column=5) self.seconds_entry = tk.Entry(self.child_frame, width=5) self.seconds_entry.grid(row=1, column=5) # enter the original hour self.seconds_entry.insert(tk.END, self.controller.all_common_variables.experimental_metadata[ 'Experiment Date and Time']. split('-')[2]) self.accept_changes = tk.Button(self.child_frame, text='Accept Changes', command=self.metadata_accept_datetime_changes) self.accept_changes.grid(row=2, column=0, columnspan=3) self.discard_changes = tk.Button(self.child_frame, text='Discard Changes', command=self.metadata_discard_changes) self.discard_changes.grid(row=2, column=3, columnspan=3) self.child_frame.pack() def metadata_accept_datetime_changes(self): """ Function is called to update the date and time metadata. It first checks for valid input and prompts the user to fix the input if it fails this test """ try: if type(int(self.day_entry.get())) and type(int(self.month_entry.get())) and type( int(self.year_entry.get())) \ and type(int(self.hour_entry.get())) and type(int(self.minutes_entry.get())) and \ type(int(self.seconds_entry.get())) is int: # first check if the input is all integer and no text if 0 < int(self.day_entry.get()) <= 31 and \ 0 < int(self.month_entry.get()) <= 12 and \ 1900 < int(self.year_entry.get()) < 2050 and \ 0 <= int(self.hour_entry.get()) < 24 and \ 0 <= int(self.minutes_entry.get()) < 60 and \ 0 <= int(self.seconds_entry.get()) < 60: # then check if the input is generally valid if 0 < int(self.day_entry.get()) <= \ calendar.monthrange(int(self.year_entry.get()), int(self.month_entry.get()))[1]: # then check if the month has as many days as is being claimed' # if all good, assign the new time/date to the main variable. Note - small caveat, this will # remove trailing zeros (e.g. 01 for January will become just 1) self.controller.all_common_variables.experimental_metadata['Experiment Date and Time'] = \ str(int(self.day_entry.get())) + '.' + str(int(self.month_entry.get())) + '.' + \ str(int(self.year_entry.get())) + '_' + str(int(self.hour_entry.get())) + '-' + \ str(int(self.minutes_entry.get())) + '-' + str(int(self.seconds_entry.get())) # set main window active again self.child.grab_release() # close the child window self.child.after(0, self.child.destroy()) # update the date and time in the main window self.controller.access_subframes( self.controller.current_window).sub_frames.experiment_date_n_time_label.configure( text=self.controller.all_common_variables.experimental_metadata['Experiment Date and Time']) else: # the user entered an invalid day relative to the month and year chosen tk.messagebox.showerror('Invalid Entry', 'Please enter a valid date') else: # the user entered an invalid anything - could be day, month, year, hour, minute or second (or all) tk.messagebox.showerror('Invalid Entry', 'Please enter valid numbers') else: # unsure how this could happen, but let's keep it to not have silent errors tk.messagebox.showerror('Invalid Entry', 'Please enter valid numbers in the format\n 01 01 2018 12 12 12\n' 'Also, let me know how you got this error!') except ValueError: # this error happens when the user enters a string tk.messagebox.showerror('Invalid Entry', 'Please enter valid numbers in the format\n 01 01 2018 12 12 12') def metadata_discard_changes(self): # set main window active again self.child.grab_release() # close the child window self.child.after(0, self.child.destroy()) def metadata_accept_organism_changes(self): self.controller.all_common_variables.experimental_metadata['Model Organism'] = self.organism_entry.get() # set main window active again self.child.grab_release() # close the child window self.child.after(0, self.child.destroy()) # update the model organism in the main window self.controller.access_subframes( self.controller.current_window).sub_frames.species_label.configure( text=self.controller.all_common_variables.experimental_metadata['Model Organism']) def metadata_fix_organism(self): # create a new window and name it self.child = tk.Toplevel() self.child.wm_title('Fix Organism') self.child.attributes("-topmost", True) # force on top # disable main window self.child.grab_set() # create a frame in the window self.child_frame = tk.Frame(self.child) self.organism_label = tk.Label(self.child_frame, text='Enter name of Organims') self.organism_label.grid(row=0, column=0) self.organism_entry = tk.Entry(self.child_frame, width=20) self.organism_entry.grid(row=1, column=0, columnspan=2) self.organism_entry.insert(tk.END, self.controller.all_common_variables.experimental_metadata['Model Organism']) self.accept_changes = tk.Button(self.child_frame, text='Accept Changes', command=self.metadata_accept_organism_changes) self.accept_changes.grid(row=2, column=0) self.discard_changes = tk.Button(self.child_frame, text='Discard Changes', command=self.metadata_discard_changes) self.discard_changes.grid(row=2, column=1) self.child_frame.pack() def high_power_LED(self): """ If the software is run on a configuration using the miniBuck the PWM signal needs to be inverted as the current controller is on if no control voltage is provided. The standard configuration with the transistor isinverse, as the transistor only conducts current if the gate is opened using the control voltage """ # create new window and name it self.controller.all_common_variables.child_high_power_LED = tk.Toplevel() self.controller.all_common_variables.child_high_power_LED.wm_title('High Power LED selection') self.controller.all_common_variables.child_high_power_LED.attributes("-topmost", True) # force on top # disable main window self.controller.all_common_variables.child_high_power_LED.grab_set() # create a temporary variable that can be saved or thrown away depending on what the user does temp_high_power_LED_bool = tk.IntVar() temp_high_power_LED_bool.set(self.controller.all_common_variables.high_power_LEDs_bool.get()) # todo once name is finalized > change name here! explanation = tk.Label(self.controller.all_common_variables.child_high_power_LED, justify=tk.LEFT, text='PiVR comes in two different versions:\n' '1) the standard version with LED strips\n' '2) the high LED power version with high power LEDs\n' 'Please select which version you have:') explanation.grid(row=0, column=0, columnspan=2, sticky='W') standard_radiobutton = tk.Radiobutton(self.controller.all_common_variables.child_high_power_LED, justify=tk.LEFT, text="Standard LED configuration (Strips)", padx=20, variable=temp_high_power_LED_bool, value=0) standard_radiobutton.grid(row=1, column=0,columnspan=2, sticky='W') high_power_radiobutton = tk.Radiobutton(self.controller.all_common_variables.child_high_power_LED, justify=tk.LEFT, text="High Powered LED configuration (Stars)", padx=20, variable=temp_high_power_LED_bool, value=1) high_power_radiobutton.grid(row=2, column=0,columnspan=2, sticky='W') quit_window_save = tk.Button(self.controller.all_common_variables.child_high_power_LED, text='Exit and save changes', command=lambda : self.exit_toplevel(popup_name='High Powered LED', save=True, savevalue=temp_high_power_LED_bool)) quit_window_save.grid(row=3, column=0, sticky='W') quit_window_discard = tk.Button(self.controller.all_common_variables.child_high_power_LED, text='Exit and discard changes', command=lambda : self.exit_toplevel(popup_name='High Powered LED', save=False)) quit_window_discard.grid(row=3, column=1, sticky='W') def exit_toplevel(self, popup_name,save=False,savevalue=None): """ Function is called when user presses the quit button on one of the toplevel (popup) windows """ if popup_name=='High Powered LED': if save: # only save if user presses the correct button self.controller.all_common_variables.high_power_LEDs_bool.set(savevalue.get()) # let the user know that the program needs to be restarted tk.messagebox.showinfo('Notification', 'For the changes to take effect\n' 'please save and exit and restart\n' 'the program') # set main window active again self.controller.all_common_variables.child_high_power_LED.grab_release() # close the child window self.controller.all_common_variables.child_high_power_LED.after(0, self.controller.all_common_variables.child_high_power_LED.destroy()) def animal_detection_method_func(self): """ User can choose between three animal detection/background reconstruction modes: Mode 1 is the standard mode. It waits until something moves and reconstructs the the background from that image. Most of the time the background image of this mode will have a little bit of the animal left. But it seems to be robust. Mode 2, or pre-define background, is the mode where the user takes a picture of the arena without the animal and then adds the animal. The first image is the background for the whole experiment. This can be the optimal mode if the experiment has no lid that has to be opened to put the animal, such as a zebrafish experiment. Mode 3, or background reconstruction by stitching, attempts to give a close to perfect background reconstruction without user intervention. It waits for the animal to move, does local thresholding to identify the animal, then the algorithm waits until the animal is gone from the original position and then takes a picture of only the rectangle that was occupied by the animal before and 'stiches' it into the first image. This works well with slow moving animal such as Drosophila larvae on a evenly illuminated background without any high-contrast image features """ # create a new window and name it self.child = tk.Toplevel() self.child.wm_title('Select Detection Method') self.child.attributes("-topmost", True) # force on top # disable main window self.child.grab_set() # create a frame in the window self.child_frame = tk.Frame(self.child) explanation_animal_detection_method = tk.Label( self.child, justify=tk.LEFT, wraplength=500, text='Before the animal can be tracked it needs to be ' 'identified. ' 'In the current implementation three options are ' 'available. ') #'Please refer to the publication/webpage for more details.') explanation_animal_detection_method.grid(row=0, column=0,sticky='W') standard_method_button = tk.Radiobutton( self.child, justify=tk.LEFT, wraplength=500, text='Standard - Mode 1\n' 'This method will work for a wide array of animals and ' 'experimental setups.\n' 'It detects the animal as soon as it moves. It then proceeds ' 'with a lazy background reconstruction that will usually not ' 'completely get rid of the animal in the background image\n', variable=self.controller.all_common_variables.animal_detection_method_var, value='Mode 1') standard_method_button.grid(row=1, column=0,sticky='W') pre_define_background_button = tk.Radiobutton( self.child, justify=tk.LEFT, wraplength=500, text='Pre-Define background - Mode 2\n' 'This method works well if the user is able to insert the animal ' 'into the Field of View of the camera without changing ' 'anything else in the image.\n' 'The user takes a background image without the animal. ' 'Then the animal (and only the animal) is introduced.\n' 'This works well if no lid is used on the arena.\n', variable=self.controller.all_common_variables.animal_detection_method_var, value='Mode 2') pre_define_background_button.grid(row=2, column=0,sticky='W') background_reconstruction_by_stitching = tk.Radiobutton( self.child, justify=tk.LEFT, wraplength=500, text='Reconstruct background by stitching - Mode 3\n' 'Similar to Standard. It only works if the animal clearly ' 'contrasts with the background.\n' 'This method detects the animal as soon as it moves. It then ' 'identies the animal using the parameters defined in' '"list_of_available_organisms.json" file. ' "It then waits for the animal to leave it's original position " "and then reconstructs the whole background image automatically\n" 'This method is recommended for slow animals/animals that do ' 'not reach the edge of the arena during detection.\n', variable=self.controller.all_common_variables.animal_detection_method_var, value='Mode 3') background_reconstruction_by_stitching.grid(row=3, column=0, sticky='W') def camera_controls_func(self): """ Popup that user can call to define optimal image quality. Contains Cam ON/Cam OFF button, Scales for the different GPIO channels, button for autoexposure, and scales for brightness and contrast :return: """ # create a new window and name it child_window_optimize_image = tk.Toplevel() child_window_optimize_image.wm_title('Set up optimal image') child_window_optimize_image.attributes("-topmost", True) # force on top # disable main window child_window_optimize_image.grab_set() # create a Labeled Frame in the window for all the camera controls child_frame_cam = tk.LabelFrame(child_window_optimize_image, text='Camera Control') child_frame_cam.grid(row=0, column=0) # create a second Labeled Frame in the window for all the LED controls child_frame_led = tk.LabelFrame(child_window_optimize_image, text='LED Control') child_frame_led.grid(row=1, column=0) #if RASPBERRY or virtual_raspberry: self.cam_on_button = tk.Button(child_frame_cam, text='Cam On', fg='green', command=self.controller.all_common_functions.cam_on_func) self.cam_on_button.grid(row=0, column=0) self.cam_off_button = tk.Button(child_frame_cam, text='Cam Off', fg='red', command=self.controller.all_common_functions.cam_off_func) self.cam_off_button.grid(row=0, column=1) self.controller.all_common_variables.auto_exposure_button = tk.Button(child_frame_cam, text='autoexp. on', command=self.controller.all_common_functions.autoexp_callback) self.controller.all_common_variables.auto_exposure_button.grid(row=1, column=0) # turn autoexposure off only if it was turned off before. In fresh out-of-the box software will # be turned on if RASPBERRY: if previous_variables is not None and self.cam.shutter_speed != 0 : self.controller.all_common_variables.auto_exposure_button['text'] = 'autoexp. off' # to keep things tidy, class the resolution widgets together in one frame: the resolution_frame resolution_frame = tk.Frame(child_frame_cam) resolution_frame.grid(row=2, column=3, rowspan=2) resolution_label = tk.Label(resolution_frame, text='Camera Resolution') resolution_label.grid(row=0, column=0) self.cam_resolution_menu = tk.OptionMenu(resolution_frame, self.controller.all_common_variables.resolution_variable, *self.controller.all_common_variables.available_resolution) self.cam_resolution_menu.grid(row=1, column=0) self.preview_size_scale = tk.Scale(child_frame_cam, from_=180, to=1000, resolution=20, label='Video preview window size', variable=self.controller.all_common_variables.preview_window_size_variable, orient='horizontal', len=180) self.preview_size_scale.grid(row=2, column=0, columnspan=2, rowspan=2) # to keep things tidy, class the framerate widgets together into one frame: the recording_framerate_frame recording_framerate_frame = tk.Frame(child_frame_cam) recording_framerate_frame.grid(row=0, column=3, rowspan=2) # the recording framerate entry and update button - keep in here as this will change the image self.recording_frameratelabel = tk.Label(recording_framerate_frame, text='Recording framerate') self.recording_frameratelabel.grid(row=0, column=0) self.recording_framerate = tk.Entry(recording_framerate_frame, width=5, textvariable=self.controller.all_common_variables.framerate_entry_variable) self.recording_framerate.grid(row=0, column=1) self.update_framerate = tk.Button(recording_framerate_frame, text='Update Preview Framerate', command=lambda: self.controller.all_common_functions.update_framerate_func( new_framerate=int(self.controller.all_common_variables.framerate_entry_variable.get()) )) self.update_framerate.grid(row=1, column=0, columnspan=2) self.backlight_intensity_scale = tk.Scale(child_frame_led, from_=0, to=1000000, resolution=50000, label='Backlight Intensity', variable=self.controller.all_common_variables.backlight_intensity_variable, orient='horizontal', len=180) self.backlight_intensity_scale.grid(row=2, column=0) self.backlight_intensity_scale.set(self.controller.all_common_variables.backlight_intensity_variable.get()) self.backlight_two_intensity_scale = tk.Scale(child_frame_led, from_=0, to=1000000, resolution=50000, label='Backlight 2 Intensity', variable=self.controller.all_common_variables.backlight_two_intensity_variable, orient='horizontal', len=180) self.backlight_two_intensity_scale.grid(row=2, column=1) self.backlight_two_intensity_scale.set(self.controller.all_common_variables.backlight_two_intensity_variable.get()) if RASPBERRY: resolution_of_analog_output_scale = int(self.controller.all_common_variables.pwm_range / 20) range_of_analog_output_scale = self.controller.all_common_variables.pwm_range else: range_of_analog_output_scale = PWM_RANGE resolution_of_analog_output_scale = PWM_RANGE/20 # todo - put in ifs so that high speed PWM can be controlled as well!!!!!! self.analog_output_channel_one_scale = tk.Scale(child_frame_led, from_=0, to=range_of_analog_output_scale, resolution=resolution_of_analog_output_scale, label='Channel 1', variable=self.controller.all_common_variables.channel_one_variable, orient='horizontal', len=180) self.analog_output_channel_one_scale.grid(row=3, column=0) self.analog_output_channel_one_scale.set(self.controller.all_common_variables.channel_one_dutycycle) self.analog_output_channel_two_scale = tk.Scale(child_frame_led, from_=0, to=range_of_analog_output_scale, resolution=resolution_of_analog_output_scale, label='Channel 2', variable=self.controller.all_common_variables.channel_two_variable, orient='horizontal', len=180) self.analog_output_channel_two_scale.grid(row=3, column=1) self.analog_output_channel_two_scale.set(self.controller.all_common_variables.channel_two_dutycycle) self.analog_output_channel_three_scale = tk.Scale(child_frame_led, from_=0, to=range_of_analog_output_scale, resolution=resolution_of_analog_output_scale, label='Channel 3', variable=self.controller.all_common_variables.channel_three_variable, orient='horizontal', len=180) self.analog_output_channel_three_scale.grid(row=4, column=0) self.analog_output_channel_three_scale.set(self.controller.all_common_variables.channel_three_dutycycle) self.analog_output_channel_four_scale = tk.Scale(child_frame_led, from_=0, to=range_of_analog_output_scale, resolution=resolution_of_analog_output_scale, label='Channel 4', variable=self.controller.all_common_variables.channel_four_variable, orient='horizontal', len=180) self.analog_output_channel_four_scale.grid(row=4, column=1) self.analog_output_channel_four_scale.set(self.controller.all_common_variables.channel_four_dutycycle) width=400 height=350 xoffset=400 yoffset=0 child_window_optimize_image.geometry("%dx%d%+d%+d" % (width, height, xoffset, yoffset)) def turn_GPIO_fully_off(self, output_channel_one, output_channel_two, output_channel_three, output_channel_four): """ This function turns off all the Channels (*not* the background) by turning the dutycylce to 0 (or the PWM_RANGE ( should be 100) in the high power version). This is achieved by using a list comprehension and cycling through the GPIOs of each channel. """ # turn off GPIOs of Channel one - keep in loop instead of just # saying GPIO17 off for forward compatibility!! # list comprehension of Channel 1 if self.controller.all_common_variables.high_power_LEDs_bool.get(): off_value = self.controller.all_common_variables.pwm_range else: off_value = 0 [self.controller.all_common_variables.pwm_object.set_PWM_dutycycle( user_gpio=output_channel_one[i_stim][0], dutycycle=off_value) for i_stim in range(len(output_channel_one))] # list comprehension of Channel 2 [self.controller.all_common_variables.pwm_object.set_PWM_dutycycle( user_gpio=output_channel_two[i_stim][0], dutycycle=off_value) for i_stim in range(len(output_channel_two))] # list comprehension of Channel 3 [self.controller.all_common_variables.pwm_object.set_PWM_dutycycle( user_gpio=output_channel_three[i_stim][0], dutycycle=off_value) for i_stim in range(len(output_channel_three))] # list comprehension of Channel 4 [self.controller.all_common_variables.pwm_object.set_PWM_dutycycle( user_gpio=output_channel_four[i_stim][0], dutycycle=off_value) for i_stim in range(len(output_channel_four))] def vr_body_part_stim_func(self): """ This function creates a tkinter window which allows the user to choose which point of the animal to use for virtual reality reference: Head, Midpoint, Centroid or Tail. """ child_window_vr_body_stim = tk.Toplevel() child_window_vr_body_stim.wm_title('Select body part') child_window_vr_body_stim.attributes("-topmost", True) #force on top # disable the main window child_window_vr_body_stim.grab_set() explanation_vr_body_stim = tk.Label( child_window_vr_body_stim, justify=tk.LEFT, wraplength=300, text='When presenting the animal with a virtual reality, ' 'the sensory organs that are being probed can be ' 'located at different parts of the body. PiVR allows* ' 'the presentation of the virtual reality depending on ' 'the position of the following body parts:' ) explanation_vr_body_stim.grid(row=0, column=0,columnspan=2) vr_stim_head_button = tk.Radiobutton( child_window_vr_body_stim, justify=tk.LEFT, text='Head', variable=self.controller.all_common_variables.vr_stim_loc_var, value='Head' ) vr_stim_head_button.grid(row=1, column=0, sticky='W') vr_stim_centroid_button = tk.Radiobutton( child_window_vr_body_stim, justify=tk.LEFT, text='Centroid', variable=self.controller.all_common_variables.vr_stim_loc_var, value='Centroid' ) vr_stim_centroid_button.grid(row=2, column=0, sticky='W') vr_stim_midpoint_button = tk.Radiobutton( child_window_vr_body_stim, justify=tk.LEFT, text='Midpoint (seems a bit unstable', variable=self.controller.all_common_variables.vr_stim_loc_var, value='Midpoint' ) vr_stim_midpoint_button.grid(row=3, column=0, sticky='W') vr_stim_tail_button = tk.Radiobutton( child_window_vr_body_stim, justify=tk.LEFT, text='Tail', variable=self.controller.all_common_variables.vr_stim_loc_var, value='Tail' ) vr_stim_tail_button.grid(row=4, column=0, sticky='W') disclaimer = tk.Label( child_window_vr_body_stim, justify=tk.LEFT, wraplength=300, text='*Please note that the head/tail classification ' 'algorithm is not perfect! You must ensure that the ' 'algorithm works reasonably well for you experiment.' ) disclaimer.grid(row=5, column=0, columnspan=2) def grab_undistort_files(self, resolution = None): """ This functions is called just before an experiment. It loads the mtx and dst files used for undistort into memory. It also prepares the newcameramtx needed to perform undistort User can provide own files. Different files are used for different resolution. This function makes sure the correct files are loaded """ #self.controller.all_common_variables.undistort_path # path to undistort files, e.g. user defined files #self.controller.all_common_variables.resolution # resolution as a string, e.g. '640x480' # Extra safety call: If for some reason the bool is set to True # do another check here if not CV2_INSTALLED: self.controller.all_common_variables.online_undistort_bool.set(0) print('cv2 not installed, cannot perform undistort.') if self.controller.all_common_variables.online_undistort_bool.get(): if resolution == None: dst_path = Path(self.controller.all_common_variables.undistort_path, self.controller.all_common_variables.resolution + \ '_dist.npy') mtx_path = Path(self.controller.all_common_variables.undistort_path, self.controller.all_common_variables.resolution + \ '_mtx.npy') # Also prepare the cameramatrix so that this doesn't have # to be done afterwards # extract width and height from resolution variable width = int(self.controller.all_common_variables.resolution.split('x')[0]) height = int(self.controller.all_common_variables.resolution.split('x')[-1]) else: dst_path = Path(self.controller.all_common_variables.undistort_path, resolution + '_dist.npy') mtx_path = Path(self.controller.all_common_variables.undistort_path, resolution + '_mtx.npy') width = int(resolution.split('x')[0]) height = int(resolution.split('x')[-1]) self.controller.all_common_variables.undistort_dst_file = np.load(dst_path) self.controller.all_common_variables.undistort_mtx_file = np.load(mtx_path) #print("width" + repr(width)) #print("height"+ repr(height)) self.controller.all_common_variables.newcameramtx, roi = \ cv2.getOptimalNewCameraMatrix( self.controller.all_common_variables.undistort_mtx_file, self.controller.all_common_variables.undistort_dst_file, (width, height), 1, (width, height)) else: #if user elects to NOT do undistort make sure the dst and mtx # variable are None self.controller.all_common_variables.undistort_dst_file = None self.controller.all_common_variables.undistort_mtx_file = None self.controller.all_common_variables.newcameramtx = None def undistort_new_lens_menu(self): """ This functions creates a tkinter window which instructions on how to create new mtx/dist files for undistort if user wants to use a different lens/camera or resolution """ child_undistort_new_lens = tk.Toplevel() child_undistort_new_lens.wm_title('Undistort, new lens') child_undistort_new_lens.attributes("-topmost", True) # force on top child_undistort_new_lens.grab_set() # disable main window wraplength = 350 explanation_undistort_new_lens = tk.Label( child_undistort_new_lens, justify=tk.LEFT, wraplength=wraplength + 10, text='PiVR comes with files to undistort a variety of ' 'resolutions for the standard lens.\n\n' 'If you use a different resolution or a different lens ' 'you may use these instructions to create files for ' 'your specific setup.\n\n' 'Note: For detailed instructions please visit pivr.org') explanation_undistort_new_lens.grid(row=0, column=0) instructions_labelframe = tk.LabelFrame( child_undistort_new_lens, text='Instructions') instructions_labelframe.grid(row=1, column=0) if RASPBERRY or virtual_raspberry: # On the Raspberry monito there'not a lot of space # therefore I only display part that is necessary # without too many explanations step_3_select_folder_label = tk.Label( instructions_labelframe, justify=tk.LEFT, wraplength=wraplength, text='Select the folder with the images of the chessboard ' 'at different angles. Once selected, the algorithm to ' 'extract the calibration files will run automatically. ' 'Make sure you have at least 10 jpg images in the ' 'folder you are selecting.') step_3_select_folder_label.grid(row=2, column=0) step_3_select_folder_button = tk.Button( instructions_labelframe, text='Chessboard images', command=self.controller.all_common_functions.create_mtx_and_dist) step_3_select_folder_button.grid(row=3, column=0) else: explanation_undistort_new_lens = tk.Label( child_undistort_new_lens, justify=tk.LEFT, wraplength=wraplength + 10, text='PiVR comes with files to undistort a variety of ' 'resolutions for the standard lens.\n\n' 'If you use a different resolution or a different lens ' 'you may use these instructions to create files for ' 'your specific setup.\n\n' 'Note: For detailed instructions please visit pivr.org') explanation_undistort_new_lens.grid(row=0,column=0) instructions_labelframe = tk.LabelFrame( child_undistort_new_lens, text='Instructions') instructions_labelframe.grid(row=1, column=0) step_1_print = tk.Label( instructions_labelframe, #anchor='w', justify=tk.LEFT, #width=wraplength, wraplength=wraplength, text='Step 1\n\n' 'Print the Chessboard.jpg. The file can be found in ' 'the following folder: "PiVR/undistort_matrices".\n') step_1_print.grid(row=0, column=0) step_2_instructions = tk.Label( instructions_labelframe, justify=tk.LEFT, wraplength=wraplength, text='Step 2\n\n' 'On your PiVR setup, use the "Timelapse Recording" option. ' 'Set the image format to "jpg". You will need around 15 ' 'pictures of the printed chessboard from different angles. ' 'Copy these images from your PiVR to this computer.\n' ) step_2_instructions.grid(row=1,column=0) step_3_select_folder_label = tk.Label( instructions_labelframe, justify=tk.LEFT, wraplength=wraplength, text='Step 3\n\n' 'Select the folder with the images of the chessboard ' 'at different angles. Once selected, the algorithm to ' 'extract the calibration files will run automatically.') step_3_select_folder_label.grid(row=2, column=0) step_3_select_folder_button = tk.Button( instructions_labelframe, text='Chessboard images', command=self.controller.all_common_functions.create_mtx_and_dist) step_3_select_folder_button.grid(row=3, column=0) step_3_whitespace = tk.Label( instructions_labelframe, text='') step_3_whitespace.grid(row=4, column=0) step_4_copy_files_label = tk.Label( instructions_labelframe, justify=tk.LEFT, wraplength=wraplength, text='Step 4\n\n' 'If you are only doing post hoc analysis skip this step.\n' 'Copy the new files in "PiVR/undistort_matrices/user_provided" ' 'created on this computer to the same folder on your ' 'PiVR setup.\n') step_4_copy_files_label.grid(row=5, column=0) step_5_select_correct_undistort = tk.Label( instructions_labelframe, justify=tk.LEFT, wraplength=wraplength, text='Step 5\n\n' 'In the PiVR GUI go to "Options"->"Undistort Options". ' 'Confirm that "Perform Undistort?" is selected. \n' 'In "Undistort Files", select "Use your own undistort ' 'files" and save the settings.\n' 'From now on PiVR will use the undistort files specific' ' to your setup.') step_5_select_correct_undistort.grid(row=6, column=0) def create_mtx_and_dist(self): """ This function asks the user to select a folder with images of the chessboard at different angles. It then uses the cv2 functionality to create the mtx and dist numpy file needed for the undistort functionality. This function could be transfered to another file if this file gets too bloated """ # Note - this should only be accessible if CV2 is installed # at the menu is off if not. path = Path(filedialog.askdirectory()) try: # This fits the provided chessboard image chessboard_rows = 6 chessboard_columns = 9 chessboard_size = (chessboard_columns, chessboard_rows) max_number_of_images = 15 # termination criteria criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001) # prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0) objp = np.zeros((chessboard_rows * chessboard_columns, 3), np.float32) objp[:, :2] = np.mgrid[0:chessboard_columns, 0:chessboard_rows].T.reshape(-1, 2) # Arrays to store object points and image points from all the images. objpoints = [] # 3d point in real world space imgpoints = [] # 2d points in image plane. #images = glob.glob('*.jpg') image_width = None image_height = None # it is unecessary to use more than the max_number_of_images - subsample #if len(images) > max_number_of_images: # temp = [] # for i in range(max_number_of_images): # temp.append(images[int(i * len(images) / max_number_of_images)]) # images = temp good_images = 0 for counter, current_file in enumerate(path.iterdir()): if 'jpg' in current_file.name and counter <= max_number_of_images: img = cv2.imread(str(current_file)) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) if image_height is None: image_height = gray.shape[1] image_width = gray.shape[0] # Find the chess board corners ret, corners = cv2.findChessboardCorners(gray, chessboard_size, None) # If found, add object points, image points (after refining them) if ret: good_images += 1 objpoints.append(objp) corners2 = cv2.cornerSubPix(gray, corners, (11, 11), (-1, -1), criteria) imgpoints.append(corners) # Draw and display the corners cv2.drawChessboardCorners(img, (7, 6), corners2, ret) cv2.imshow('img', img) cv2.waitKey(500) cv2.destroyAllWindows() if good_images > 5: ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1], None, None) resolution_as_string = repr(image_height) + 'x' + repr(image_width) path_to_save = Path(SOFTWARE_PATH, 'undistort_matrices','user_provided') # Make sure the folder exists before trying to save there. Path(path_to_save).mkdir(parents=True, exist_ok=True) np.save(Path(path_to_save, resolution_as_string + '_dist.npy'), dist) np.save(Path(path_to_save, resolution_as_string + '_mtx.npy'), mtx) print('Used ' + repr(good_images+1) + ' images for calibration.') print('Calibration successful.') else: tk.messagebox.showerror('Not enough good images', 'The provided folder does not contain enough\n' 'good images for calibration.\n' 'Please provide at least 5 good images of the\n' 'checkerboard in different angles.') print('No jpg images in folder') except Exception as e: print("Unable to perform calibration. ") print(e)
[docs] class TrackingFrame(tk.Frame): """ The constructor class used to create the “Tracking” frame is different from the "Dynamic Virtual Reality" frame. """ def __init__(self, parent, controller, camera_class=None): # discussion about different ways to create the class # https://stackoverflow.com/questions/7300072/inheriting-from-frame-or-not-in-a-tkinter-application and # https://stackoverflow.com/questions/17466561/best-way-to-structure-a-tkinter-application/17470842#17470842 tk.Frame.__init__(self, parent) # reference the camera and the controller self.camera_class = camera_class self.cam = self.camera_class.cam self.controller = controller # Here the frame that will hold all the camera controls is being prepared. # self.subframe_preexperiment = tk.LabelFrame(self, text='Tracking') # self.subframe_preexperiment.grid(row=1, column=0) # construct the camera frame by calling the function 'camera_frame' in the class SubFrames which is referenced # by the controller as 'sub_frames' # self.controller.sub_frames.camera_frame(self.subframe_preexperiment) # self.controller.sub_frames.misc_frame(self.subframe_preexperiment, # observation_mode = True, distance_configuration=True, # model_organism = True) self.sub_frames = SubFrames(self.camera_class, self.controller, cam_frame=True, misc_frame=True, observation_mode=False, distance_configuration=False, model_organism=False, exp_ctr_frame=True, start_text='Start Tracking', experiment_name='Online Tracking', frame=self, quit_frame=False, time_dependent_stim_Frame=True) # After the construction of the frame it needs to watch out for user input. # not sure if this is needed! Maybe we only need to call this once as tall the variables are anyway in # all_common_variables self.controller.all_common_functions.menu_callback_shared(cam=True, backlight=True, analog_output_one=True, ) # todo - continue here by adding the ifs of the callback function # This function defines what should happen when the user presses on the 'Start Tracking' Button
[docs] def start_experiment_function(self): """ Each constructor class which is used to start an experiment has this function. They all have the same name. This is the one in the "TrackingFrame" It checks for: #) correct camera resolution, #) if the user has specified the pixel per mm If one of these tests fails, the user gets an error message and the experiment will not start. If the experiment does start, the :py:class:`control_file.ControlTracking` is called that then handles the detection and tracking of the animal. """ # Make sure the user defined the pixel/mm - otherwise the experiment can not start! if self.controller.all_common_variables.pixel_per_mm_var.get() != 0: # boxsize is the area in all four directions starting # from the bounding box of the animal that will be # used in the next frame to look for the animal. It # depends on the speed of the animal and the framerate # boxsize is calculated for each experiment: # max_speed_of_animal_in_mm_per_sec / framerate (to # get speed in mm/frames) then multiplied by pixel # per mm to get speed in pixel/frame, the actualy # parameter that is needed for the program to function boxsize = (organisms_and_heuristics[ self.controller.all_common_variables.model_organism_variable.get()][ 'max_speed_animal_mm_per_s'] / int(self.controller.all_common_variables.framerate_entry_variable.get()) * self.controller.all_common_variables.pixel_per_mm_var.get()) # since the boxsize now also depends on the max speed # of the animal and the framerate it can become # extremely small (i.e. a larva that moves 2mm/s at # 30frames will only move 0.066mm which at 5px/mm is # only 0.33pixel # If in any frame the animal is not completely # detected the algorithm could never recover. This is # why the length of the animal must be taken into # account as well! boxsize = boxsize \ + (organisms_and_heuristics[ self.controller.all_common_variables.model_organism_variable.get()][ 'max_skeleton_length_mm'] * self.controller.all_common_variables.pixel_per_mm_var.get() ) print('boxsize is ' + repr(boxsize)) # Grab the relevant undistort files self.controller.all_common_functions.grab_undistort_files() control_file.ControlTracking( boxsize=boxsize, signal=self.controller.all_common_variables.signal, cam=self.controller.all_common_variables.cam, base_path=self.controller.all_common_variables.path_entry_variable.get(), genotype= self.controller.all_common_variables.genotype_entry_variable.get(), recording_framerate=int(self.controller.all_common_variables.framerate_entry_variable.get()), resolution=self.controller.all_common_variables.resolution, recordingtime=int(self.controller.all_common_variables.recording_time_variable.get()), # preview=self.controller.all_common_variables.experiment_observation_mode_output, # preview_resize=self.controller.all_common_variables.observation_resize_variable.get(), pixel_per_mm=self.controller.all_common_variables.pixel_per_mm_var.get(), model_organism=self.controller.all_common_variables.model_organism_value, pwm_object=self.controller.all_common_variables.pwm_object, high_power_led_bool=self.controller.all_common_variables.high_power_LEDs_bool.get(), # put somewhere else, confusing, same for other line time_dependent_stim_file=self.controller.all_common_variables.time_dependent_stimulation_file, time_dependent_stim_file_name=self.controller.all_common_variables.time_dependent_stim_file_name, organisms_and_heuristics=organisms_and_heuristics, debug_mode=self.controller.all_common_variables.debug_mode_var.get(), animal_detection_mode=self.controller.all_common_variables.animal_detection_method_var.get(), output_channel_one=self.controller.all_common_variables.channel_one, output_channel_two=self.controller.all_common_variables.channel_two, output_channel_three=self.controller.all_common_variables.channel_three, output_channel_four=self.controller.all_common_variables.channel_four, controller = self.controller, background_channel=self.controller.all_common_variables.background, background_2_channel=self.controller.all_common_variables.background_two, background_dutycycle=self.controller.all_common_variables.backlight_intensity_value, background_2_dutycycle=self.controller.all_common_variables.backlight_two_intensity_value, version_info=VERSION_INFO, save_centroids_npy=self.controller.all_common_variables.save_centroids_npy.get(), save_heads_npy=self.controller.all_common_variables.save_heads_npy.get(), save_tails_npy=self.controller.all_common_variables.save_tails_npy.get(), save_midpoints_npy=self.controller.all_common_variables.save_midpoints_npy.get(), save_bbox_npy=self.controller.all_common_variables.save_bbox_npy.get(), save_stim_npy=self.controller.all_common_variables.save_stim_npy.get(), save_thresh_npy=self.controller.all_common_variables.save_thresh_npy.get(), save_skeleton_npy=self.controller.all_common_variables.save_skel_npy.get(), undistort_dst=self.controller.all_common_variables.undistort_dst_file, undistort_mtx=self.controller.all_common_variables.undistort_mtx_file, newcameramtx=self.controller.all_common_variables.newcameramtx ) else: tk.messagebox.showerror('Define Pixel/mm','You have to define pixel per mm before starting\n' 'an experiment!\n' 'Please go to "options">"Define Pixel/mm" to do so!')
class VirtualRealityFrame(tk.Frame): ''' This class constructs the window that lets the user display a virtual reality while tracking. ''' def __init__(self, parent, controller, camera_class=None): # discussion about different ways to create the class # https://stackoverflow.com/questions/7300072/inheriting- # from-frame-or-not-in-a-tkinter-application and # https://stackoverflow.com/questions/17466561/best-way- # to-structure-a-tkinter-application/17470842#17470842 tk.Frame.__init__(self, parent) # reference the camera and the controller self.camera_class = camera_class self.cam = self.camera_class.cam self.controller = controller # Here the frame that will hold all the camera controls is # being prepared. self.sub_frames = SubFrames(self.camera_class, self.controller, cam_frame=True, misc_frame=True, observation_mode=False, distance_configuration=False, model_organism=False, exp_ctr_frame=True, start_text='Start Tracking VR', experiment_name='Closed Loop stimulation', VR_arena=True, frame=self, quit_frame=False) # After the construction of the frame it needs to watch out for user input. self.controller.all_common_functions.menu_callback_shared( cam=True, backlight=True, analog_output_one=True, ) # todo - continue here by adding the ifs of the callback # function # This function defines what should happen when the user # presses on the 'Start Tracking' Button def start_experiment_function(self): experiment_is_go = True # Make sure the VR_arena resolution fits with the desired # recording solution desired_resolution = [\ int(self.controller.all_common_variables.resolution.split('x')[0]), int(self.controller.all_common_variables.resolution.split('x')[1]) ] arena_resolution = [\ self.controller.all_common_variables.vr_arena.shape[1], self.controller.all_common_variables.vr_arena.shape[0] ] print(arena_resolution) if desired_resolution != arena_resolution: tk.messagebox.showerror( 'Resolution Error', 'You selected the following recording resolution:' + repr(desired_resolution) + '.\n' + 'You selected a VR arena with the following ' 'resolution:' + repr(arena_resolution) + '.\n' + 'You must select a VR arena with the same resolution ' 'as the recording resolution.') experiment_is_go = False # also make sure the user defined the pixel/mm - otherwise # the experiment can not start! if self.controller.all_common_variables.pixel_per_mm_var.get() == 0: tk.messagebox.showerror( 'Define Pixel/mm', 'You have to define pixel per mm before starting\n' 'an experiment!\n' 'Please go to "options">"Define Pixel/mm" to do so!') experiment_is_go = False # make sure the intensity scaling is done between 0 and 100% # percent! if not 0 \ <= float(self.controller.all_common_variables.vr_intensity_adjustment_variable.get()) \ <= 100: tk.messagebox.showerror( 'Adjust power in percent', 'Power is adjusted in percent. The value has to be ' 'between 0 and 100. You have entered ' + repr( float(self.controller.all_common_variables.vr_intensity_adjustment_variable.get()))) experiment_is_go = False # if arena is chosen where the animal angle is taken into # account the animal detection mode must be mode 3! # It wouldn't make sense otherwise as the software can not # know in which direction the animal is heading if self.controller.all_common_variables.placed_animal is not None: if len(self.controller.all_common_variables.placed_animal) == 3 \ and self.controller.all_common_variables.animal_detection_method_var.get() != 'Mode 3': tk.messagebox.showerror( 'Can not define orientation', 'You have selected an arena which is supposed to ' 'be positioned relative to the orientation of the ' 'animal.\n' 'This is only possible with Animal Detection Mode 3!\n' 'Either choose an arena without alignment of arena ' 'to orientation of the animal or select Mode 3 in' '"options">"Animal Detection Method"' ) experiment_is_go = False # if user selects time varying arena and wants to adjust the # arena intensity, throw error as this is not implement! if self.controller.all_common_variables.vr_arena_multidimensional \ and float(self.controller.all_common_variables.vr_intensity_adjustment_variable.get()) != 100.0: tk.messagebox.showwarning( 'Time varying arena can not be scaled', 'You have selected a arena that varies in time and ' 'you try to scale it by ' + repr(float(self.controller.all_common_variables.vr_intensity_adjustment_variable.get())) + '.The scaling of time varying arenas is currently ' 'not implemented. To start the experiment enter "100" ' 'in "Adjust Power [%]"' ) # Here the arena is translated into the proper number range. # Implicit assumptions at this point: # 1) Multidimensional arenas are in uint8, the number space goes from 0 to 255 # 2) static virtual arenas are in uint16, the number space goes from 0 to 65535 # # The reasons: # obviously uint16 allows for much finer grain gradients than # uint8. A static, and therefore 2-dimensional # uint16 array is neglible small, considering RAM. # A 3D, time-varying array is much larger. On RAM, uint8 is # about half the size of uint16. I chose, at this point, # to just make the time-varying arrays uint8 to have more # timepoints I can use. This can easily be # changed in the future, of course if self.controller.all_common_variables.vr_arena_multidimensional: try: #vr_arena = self.controller.all_common_variables.vr_arena * \ # (self.controller.all_common_variables.pwm_range / 255) if RASPBERRY: # first, make an explicit copy - very memory inefficient. Strongly constrains how large the # dynamic arena can be - in the future improve this! But make sure not to introduce a bug that will # only appear after running the arena a couple of times! vr_arena = self.controller.all_common_variables.vr_arena.copy() except MemoryError: tk.messagebox.showerror( 'Memory Error', 'The arena you attempted to run the experiment ' 'with is too large for the ' 'RaspberryPi to handle and it ran out of RAM.\n' 'Tips to reduce size of your time-varying ' 'virtual arena:\n' '1.) Make sure you are using uint8 number space\n' '2.) Reduce the number of timepoints') experiment_is_go = False else: if RASPBERRY: # first, make an explicit copy - memory is not a # problem when not working with dynamical arrays! # this is explicit and can't really break as the # original vr_arena is never touched, just a copy of it vr_arena = self.controller.all_common_variables.vr_arena.copy() # Now calculate the actual pwm range #vr_arena *= (self.controller.all_common_variables.pwm_range / 65535) if experiment_is_go: # boxsize is the area in all four directions starting # from the bounding box of the animal that will be used # in the next frame to look for the animal. It depends on # the speed of the animal and the framerate # boxsize is calculated for each experiment: # max_speed_of_animal_in_mm_per_sec / framerate (to get # speed in mm/frames) then multiplied by pixel per mm to # get speed in pixel/frame, the actualy parameter that is # needed for the program to function boxsize = (organisms_and_heuristics[ self.controller.all_common_variables.model_organism_variable.get()][ 'max_speed_animal_mm_per_s'] / int( self.controller.all_common_variables.framerate_entry_variable.get()) * self.controller.all_common_variables.pixel_per_mm_var.get()) # since the boxsize now also depends on the max speed of # the animal and the framerate it can become extremely # small (i.e. a larva that moves 2mm/s at 30frames will # only move 0.066mm which at 5px/mm is only 0.33pixel. If # in any frame the animal is not completely detected the # algorithm could never recover. This is why the length # of the animal must be taken into account as well! boxsize = boxsize + (organisms_and_heuristics[ self.controller.all_common_variables.model_organism_variable.get()][ 'max_skeleton_length_mm'] * self.controller.all_common_variables.pixel_per_mm_var.get()) print('boxsize is ' + repr(boxsize)) #print('starting experiment from the vr class in the GUI') #print(np.amax(vr_arena)) # Grab the relevant undistort files self.controller.all_common_functions.grab_undistort_files() vr_arena_name = self.controller.all_common_variables.vr_arena_name.split('/')[-1] control_file.ControlTracking( boxsize=boxsize, signal=self.controller.all_common_variables.signal, cam=self.controller.all_common_variables.cam, base_path=self.controller.all_common_variables.path_entry_variable.get(), genotype= self.controller.all_common_variables.genotype_entry_variable.get(), recording_framerate=int(self.controller.all_common_variables.framerate_entry_variable.get()), resolution=self.controller.all_common_variables.resolution, recordingtime=int(self.controller.all_common_variables.recording_time_variable.get()), #preview=self.controller.all_common_variables.experiment_observation_mode_output, #preview_resize=self.controller.all_common_variables.preview_resize, pixel_per_mm=self.controller.all_common_variables.pixel_per_mm_var.get(), model_organism=self.controller.all_common_variables.model_organism_variable.get(), vr_arena=vr_arena, pwm_object=self.controller.all_common_variables.pwm_object, placed_animal=self.controller.all_common_variables.placed_animal, vr_arena_name=vr_arena_name, high_power_led_bool=self.controller.all_common_variables.high_power_LEDs_bool.get(), organisms_and_heuristics=organisms_and_heuristics, debug_mode=self.controller.all_common_variables.debug_mode_var.get(), animal_detection_mode=self.controller.all_common_variables.animal_detection_method_var.get(), output_channel_one=self.controller.all_common_variables.channel_one, output_channel_two=self.controller.all_common_variables.channel_two, output_channel_three=self.controller.all_common_variables.channel_three, output_channel_four=self.controller.all_common_variables.channel_four, overlay_bool=self.controller.all_common_variables.overlay_bool, controller=self.controller, background_channel = self.controller.all_common_variables.background, background_2_channel = self.controller.all_common_variables.background_two, background_dutycycle=self.controller.all_common_variables.backlight_intensity_value, background_2_dutycycle=self.controller.all_common_variables.backlight_two_intensity_value, #vr_update_rate=self.controller.all_common_variables.vr_update_rate, pwm_range=self.controller.all_common_variables.pwm_range, adjust_intensity=float(self.controller.all_common_variables.vr_intensity_adjustment_variable.get()), vr_stim_location=self.controller.all_common_variables.vr_stim_loc_var.get(), version_info=VERSION_INFO, save_centroids_npy=self.controller.all_common_variables.save_centroids_npy.get(), save_heads_npy=self.controller.all_common_variables.save_heads_npy.get(), save_tails_npy=self.controller.all_common_variables.save_tails_npy.get(), save_midpoints_npy=self.controller.all_common_variables.save_midpoints_npy.get(), save_bbox_npy=self.controller.all_common_variables.save_bbox_npy.get(), save_stim_npy=self.controller.all_common_variables.save_stim_npy.get(), save_thresh_npy=self.controller.all_common_variables.save_thresh_npy.get(), save_skeleton_npy=self.controller.all_common_variables.save_skel_npy.get(), undistort_dst=self.controller.all_common_variables.undistort_dst_file, undistort_mtx=self.controller.all_common_variables.undistort_mtx_file, newcameramtx=self.controller.all_common_variables.newcameramtx, )
[docs] class DynamicVirtualRealityFrame(tk.Frame): """ The constructor class above is used to create the "Dynamic VR Arena" frame. """ def __init__(self, parent, controller, camera_class=None): # discussion about different ways to create the class # https://stackoverflow.com/questions/7300072/inheriting-from-frame-or-not-in-a-tkinter-application and # https://stackoverflow.com/questions/17466561/best-way-to-structure-a-tkinter-application/17470842#17470842 tk.Frame.__init__(self, parent) # reference the camera and the controller self.camera_class = camera_class self.cam = self.camera_class.cam self.controller = controller # Here the frame that will hold all the camera controls is # being prepared. self.sub_frames = SubFrames( self.camera_class, self.controller, cam_frame=True, misc_frame=True, observation_mode=False, distance_configuration=False, model_organism=False, exp_ctr_frame=True, start_text='Start Tracking, dynamic VR', experiment_name='Dynamic VR', VR_arena=True, frame=self, quit_frame=False, dynamic_VR_arena=True) # After the construction of the frame it needs to watch out for user input. self.controller.all_common_functions.menu_callback_shared( cam=True, backlight=True, analog_output_one=True, ) # todo - continue here by adding the ifs of the callback function
[docs] def start_experiment_function(self): """ Each constructor class which is used to start an experiment has this function. They all have the same name. This is the one in the "DynamicVirtualRealityFrame" It checks for: #) correct camera resolution, #) if the user has specified the pixel per mm #) if a copy of the dynamic virtual reality fits into RAM memory. If one of these tests fails, the user gets an error message and the experiment will not start. If the experiment does start, the :py:class:`control_file.ControlTracking` is called that then handles the detection and tracking of the animal. """ experiment_is_go = True # currently ony resolution of 640x480 is allowed. Might be a # new feature in the future if self.controller.all_common_variables.resolution != '640x480': tk.messagebox.showerror( 'Invalid resolution', 'It is only possible to do a tracking experiment' '\nusing the 640x480 resolution!' '\nPlease change resolution to start experiment!') experiment_is_go = False # also make sure the user defined the pixel/mm - otherwise # the experiment can not start! if self.controller.all_common_variables.pixel_per_mm_var.get() == 0: tk.messagebox.showerror( 'Define Pixel/mm', 'You have to define pixel per mm before starting' '\nan experiment!' '\nPlease go to "options">"Define Pixel/mm" to do so!') experiment_is_go = False try: if RASPBERRY: # first, make an explicit copy - very memory # inefficient. Strongly constrains how large the # dynamic arena can be - in the future improve this! # But make sure not to introduce a bug that will # only appear after running the arena a couple # of times! vr_arena = self.controller.all_common_variables.vr_arena.copy() except MemoryError: tk.messagebox.showerror( 'Memory Error', 'The arena you attempted to run the experiment with ' 'is too large for the RaspberryPi to handle and ' 'it ran out of RAM.' '\nTips to reduce size of your time-varying virtual arena:' '\n1.) Make sure you are using uint8 number space' '\n2.) Reduce the number of timepoints') experiment_is_go = False if experiment_is_go: # boxsize is the area in all four directions starting # from the bounding box of the animal that will be used # in the next frame to look for the animal. It depends on # the speed of the animal and the framerate. # boxsize is calculated for each experiment: # max_speed_of_animal_in_mm_per_sec / framerate (to get # speed in mm/frames) then multiplied by pixel per mm to # get speed in pixel/frame, the actualy parameter that is # needed for the program to function. boxsize = ( organisms_and_heuristics[ self.controller.all_common_variables.model_organism_variable.get()][ 'max_speed_animal_mm_per_s'] / int( self.controller.all_common_variables.framerate_entry_variable.get()) * self.controller.all_common_variables.pixel_per_mm_var.get()) # since the boxsize now also depends on the max speed of # the animal and the framerate it can become extremely # small (i.e. a larva that moves 2mm/s at 30frames will # only move 0.066mm which at 5px/mm is only 0.33pixel # If in any frame the animal is not completely detected # the algorithm could never recover. This is why the # length of the animal must be taken into account as well! boxsize = boxsize \ + (organisms_and_heuristics[ self.controller.all_common_variables.model_organism_variable.get()][ 'max_skeleton_length_mm'] \ * self.controller.all_common_variables.pixel_per_mm_var.get()) # Finally, grab the name of the arena vr_arena_name = self.controller.all_common_variables.vr_arena_name.split('/')[-1] # Grab the relevant undistort files self.controller.all_common_functions.grab_undistort_files() control_file.ControlTracking( boxsize=boxsize, signal=self.controller.all_common_variables.signal, cam=self.controller.all_common_variables.cam, base_path=self.controller.all_common_variables.path_entry_variable.get(), genotype= self.controller.all_common_variables.genotype_entry_variable.get(), recording_framerate=int(self.controller.all_common_variables.framerate_entry_variable.get()), resolution=self.controller.all_common_variables.resolution, recordingtime=int(self.controller.all_common_variables.recording_time_variable.get()), pixel_per_mm=self.controller.all_common_variables.pixel_per_mm_var.get(), model_organism=self.controller.all_common_variables.model_organism_variable.get(), vr_arena=vr_arena, pwm_object=self.controller.all_common_variables.pwm_object, placed_animal=self.controller.all_common_variables.placed_animal, vr_arena_name=vr_arena_name, high_power_led_bool=self.controller.all_common_variables.high_power_LEDs_bool.get(), organisms_and_heuristics=organisms_and_heuristics, debug_mode=self.controller.all_common_variables.debug_mode_var.get(), animal_detection_mode=self.controller.all_common_variables.animal_detection_method_var.get(), output_channel_one=self.controller.all_common_variables.channel_one, output_channel_two=self.controller.all_common_variables.channel_two, output_channel_three=self.controller.all_common_variables.channel_three, output_channel_four=self.controller.all_common_variables.channel_four, overlay_bool=self.controller.all_common_variables.overlay_bool, controller=self.controller, background_channel = self.controller.all_common_variables.background, background_2_channel = self.controller.all_common_variables.background_two, background_dutycycle=self.controller.all_common_variables.backlight_intensity_value, background_2_dutycycle=self.controller.all_common_variables.backlight_two_intensity_value, vr_update_rate=self.controller.all_common_variables.vr_update_rate, pwm_range=self.controller.all_common_variables.pwm_range, adjust_intensity=float(self.controller.all_common_variables.vr_intensity_adjustment_variable.get()), vr_stim_location=self.controller.all_common_variables.vr_stim_loc_var.get(), version_info=VERSION_INFO, save_centroids_npy=self.controller.all_common_variables.save_centroids_npy.get(), save_heads_npy=self.controller.all_common_variables.save_heads_npy.get(), save_tails_npy=self.controller.all_common_variables.save_tails_npy.get(), save_midpoints_npy=self.controller.all_common_variables.save_midpoints_npy.get(), save_bbox_npy=self.controller.all_common_variables.save_bbox_npy.get(), save_stim_npy=self.controller.all_common_variables.save_stim_npy.get(), save_thresh_npy=self.controller.all_common_variables.save_thresh_npy.get(), save_skeleton_npy=self.controller.all_common_variables.save_skel_npy.get(), undistort_dst=self.controller.all_common_variables.undistort_dst_file, undistort_mtx=self.controller.all_common_variables.undistort_mtx_file, newcameramtx=self.controller.all_common_variables.newcameramtx, )
class FullFrameImagesRecording(tk.Frame): def __init__(self, parent, controller, camera_class=None): tk.Frame.__init__(self, parent) self.controller = controller self.camera_class = camera_class self.cam = self.camera_class.cam # Here the frame that will hold all the camera controls is being prepared. #self.subframe_preexperiment = tk.LabelFrame(self, text='Full Frame Image Recording') #self.subframe_preexperiment.grid(row=1, column=0) # construct the window by calling the SubFrames class with whatever we need. This instance is then bound to the # variable self.sub_frames to make it's variables etc available self.sub_frames = SubFrames(self.camera_class, self.controller, cam_frame=True, misc_frame=True, observation_mode=False, distance_configuration=False, model_organism=False, exp_ctr_frame=True, start_text='Start Recording Images', experiment_name='Image Sequence', VR_arena=False, frame=self, quit_frame=False, time_dependent_stim_Frame=True ) # After the construction of the frame it needs to watch out for user input. self.controller.all_common_functions.menu_callback_shared(cam=True, backlight=True, analog_output_one=True, ) # todo - continue here by adding the ifs of the callback function # This function defines what should happen when the user presses on the 'Start Tracking' Button if RASPBERRY: def start_experiment_function(self): self.cam.framerate = int(self.controller.all_common_variables.framerate_entry_variable.get()) print('starting experiment from tracking frame') record_videos_and_image_sequences.RecordFullFrameImages( cam=self.controller.all_common_variables.cam, base_path=self.controller.all_common_variables.path_entry_variable.get(), genotype= self.controller.all_common_variables.genotype_entry_variable.get(), recording_framerate=int(self.controller.all_common_variables.framerate_entry_variable.get()), resolution=self.controller.all_common_variables.resolution, recording_time=int(self.controller.all_common_variables.recording_time_variable.get()), pixel_per_mm=self.controller.all_common_variables.pixel_per_mm_var.get(), model_organism=self.controller.all_common_variables.model_organism_variable.get(), time_dependent_stim_file=self.controller.all_common_variables.time_dependent_stimulation_file, #stim_GPIO=STIM_GPIO_1, pwm_object=self.controller.all_common_variables.pwm_object, output_channel_one=self.controller.all_common_variables.channel_one, output_channel_two=self.controller.all_common_variables.channel_two, output_channel_three=self.controller.all_common_variables.channel_three, output_channel_four=self.controller.all_common_variables.channel_four, image_format = self.controller.all_common_variables.image_sequence_format_var.get(), high_power_LED_bool=self.controller.all_common_variables.high_power_LEDs_bool.get(), pwm_range = self.controller.all_common_variables.pwm_range, background_channel=self.controller.all_common_variables.background, background_2_channel=self.controller.all_common_variables.background_two, background_dutycycle=self.controller.all_common_variables.backlight_intensity_value, background_2_dutycycle=self.controller.all_common_variables.backlight_two_intensity_value, version_info=VERSION_INFO ) print('calling GPIO fully off now') self.controller.all_common_functions.turn_GPIO_fully_off( output_channel_one=self.controller.all_common_variables.channel_one, output_channel_two=self.controller.all_common_variables.channel_two, output_channel_three=self.controller.all_common_variables.channel_three, output_channel_four=self.controller.all_common_variables.channel_four) else: def start_experiment_function(self): print('you can not record images while not on the Raspberry Pi') class TimelapseRecording(tk.Frame): def __init__(self, parent, controller, camera_class=None): tk.Frame.__init__(self, parent) self.controller = controller self.camera_class = camera_class self.cam = self.camera_class.cam # Here the frame that will hold all the camera controls is being prepared. #self.subframe_preexperiment = tk.LabelFrame(self, text='Full Frame Image Recording') #self.subframe_preexperiment.grid(row=1, column=0) # construct the window by calling the SubFrames class with whatever we need. This instance is then bound to the # variable self.sub_frames to make it's variables etc available self.sub_frames = SubFrames(self.camera_class, self.controller, cam_frame=True, misc_frame=True, observation_mode=False, distance_configuration=False, model_organism=False, exp_ctr_frame=True, start_text='Start Timelapse', experiment_name='Timelapse', VR_arena=False, frame=self, quit_frame=False, time_dependent_stim_Frame=False, timelapse_frame=True ) # After the construction of the frame it needs to watch out for user input. self.controller.all_common_functions.menu_callback_shared(cam=True, backlight=True, analog_output_one=True, ) # todo - continue here by adding the ifs of the callback function # This function defines what should happen when the user presses on the 'Start Tracking' Button if RASPBERRY: def start_experiment_function(self): # Prepare the timelapse: # First, combine days, hours and minutes for recording time: try: recording_days = int(self.controller.all_common_variables.recording_days_variable.get()) recording_hours = int(self.controller.all_common_variables.recording_hours_variable.get()) recording_minutes = int(self.controller.all_common_variables.recording_minutes_variable.get()) total_recording_time = (60*recording_minutes) + \ (60*60*recording_hours) + \ (24*60*60*recording_days) time_between_days = int(self.controller.all_common_variables.between_time_days.get()) time_between_hours = int(self.controller.all_common_variables.between_time_hours.get()) time_between_minutes = int(self.controller.all_common_variables.between_time_minutes.get()) between_time_seconds = int(self.controller.all_common_variables.between_time_seconds.get()) total_time_between = between_time_seconds+ \ (60*time_between_minutes) + \ (60*60*time_between_hours) + \ (24*60*60*time_between_days) experiment_go = True except ValueError: experiment_go = False tk.messagebox.showerror( 'Wrong Input', 'Please enter numbers in all fields:\n' 'Days, Hours, Minutes, and for the lower row, ' 'Seconds') if total_recording_time > total_time_between: experiment_go = True else: experiment_go = False tk.messagebox.showerror( 'Wrong Input', 'Please make sure that the total time\n' 'is larger than the time between images.') if experiment_go: self.cam.framerate = int(self.controller.all_common_variables.framerate_entry_variable.get()) print('starting experiment from tracking frame') record_videos_and_image_sequences.Timelapse( cam=self.controller.all_common_variables.cam, base_path=self.controller.all_common_variables.path_entry_variable.get(), genotype= self.controller.all_common_variables.genotype_entry_variable.get(), resolution=self.controller.all_common_variables.resolution, recordingtime=int(total_recording_time), time_between=int(total_time_between), pixel_per_mm=self.controller.all_common_variables.pixel_per_mm_var.get(), model_organism=self.controller.all_common_variables.model_organism_variable.get(), image_format = self.controller.all_common_variables.image_sequence_format_var.get(), pwm_range = self.controller.all_common_variables.pwm_range, background_channel=self.controller.all_common_variables.background, background_2_channel=self.controller.all_common_variables.background_two, background_dutycycle=self.controller.all_common_variables.backlight_intensity_value, background_2_dutycycle=self.controller.all_common_variables.backlight_two_intensity_value, version_info=VERSION_INFO ) else: def start_experiment_function(self): print('you can not record images while not on the Raspberry Pi') class VideoFrame(tk.Frame): def __init__(self, parent, controller, camera_class=None): tk.Frame.__init__(self, parent) self.controller = controller self.camera_class = camera_class self.cam = self.camera_class.cam # Here the frame that will hold all the camera controls is being prepared. #self.subframe_preexperiment = tk.LabelFrame(self, text='Video') #self.subframe_preexperiment.grid(row=1, column=0) # construct the window by calling the SubFrames class with whatever we need. This instance is then bound to the # variable self.sub_frames to make it's variables etc available self.sub_frames = SubFrames(self.camera_class, self.controller, cam_frame=True, misc_frame=True, observation_mode=False, distance_configuration=False, model_organism=False, exp_ctr_frame=True, start_text='Start Video Recording', experiment_name='Video', VR_arena=False, frame=self, quit_frame=False, time_dependent_stim_Frame=True ) # After the construction of the frame it needs to watch out for user input. self.controller.all_common_functions.menu_callback_shared(cam=True, backlight=True, analog_output_one=True, ) # todo - continue here by adding the ifs of the callback function if RASPBERRY or virtual_raspberry: def start_experiment_function(self): record_videos_and_image_sequences.RecordVideo(genotype= self.controller.all_common_variables.genotype_entry_variable.get(), recording_framerate=int(self.controller.all_common_variables.framerate_entry_variable.get()), resolution=self.controller.all_common_variables.resolution, recordingtime=int(self.controller.all_common_variables.recording_time_variable.get()), signal=self.controller.all_common_variables.signal, cam=self.cam, base_path=self.controller.all_common_variables.path_entry_variable.get(), time_dependent_stim_file=self.controller.all_common_variables.time_dependent_stimulation_file, #stim_GPIO=[STIM_GPIO_1, STIM_GPIO_2, STIM_GPIO_3], pwm_object=self.controller.all_common_variables.pwm_object, model_organism=self.controller.all_common_variables.model_organism_value, pixel_per_mm=self.controller.all_common_variables.pixel_per_mm_var.get(), output_channel_one=self.controller.all_common_variables.channel_one, output_channel_two=self.controller.all_common_variables.channel_two, output_channel_three=self.controller.all_common_variables.channel_three, output_channel_four=self.controller.all_common_variables.channel_four, high_power_LED_bool=self.controller.all_common_variables.high_power_LEDs_bool.get(), pwm_range = self.controller.all_common_variables.pwm_range, version_info = VERSION_INFO ) # turn of GPIOs of Channel one - keep in loop instead of just saying GPIO17 off for forward compatibility!! # list comprehension of Channel 1 self.controller.all_common_functions.turn_GPIO_fully_off( output_channel_one=self.controller.all_common_variables.channel_one, output_channel_two=self.controller.all_common_variables.channel_two, output_channel_three=self.controller.all_common_variables.channel_three, output_channel_four=self.controller.all_common_variables.channel_four) else: def start_experiment_function(self): print('you can not record a video if not on the RaspberryPi') class TrackedAnalysisFrame(tk.Frame): def __init__(self, parent, controller, camera_class=None): """ This constructor class used to create the “Analysis” frame in the "Tools" Menu. This frame allows the user to select either a single or a number of folders to analyze in several different ways. """ tk.Frame.__init__(self, parent) self.controller = controller # the variable that gets changed when the option menu for # tools is selected self.analysis_type_var = tk.StringVar() self.analysis_available_modes = ('Distance to source', 'Distance to source, VR', 'Single animal tracking') self.analysis_type_var.set(self.analysis_available_modes[0]) # the variable that is assinged to the Checkbutton below self.multiplefolders_var = tk.IntVar() self.multiplefolders = True # todo import previous variables bla self.just_changed = True self.files_and_folders_common = None # size of window relative to display screen self.size_of_window = tk.StringVar() # available options for window size self.available_size_of_window = ('Small', 'Medium', 'Large') self.size_of_window.set(self.available_size_of_window[0]) self.subframe_title = tk.Label( self, text='Analysis', font='Helvetica 12 bold') self.subframe_title.grid(row=0, column=0, columnspan=2) # Buttons etc. to select the desired folder(s) self.subframe_folder = tk.LabelFrame(self, text='Data') self.subframe_folder.grid(row=1, column=0, columnspan=2, sticky='nsew') self.pathlabel = tk.Label(self.subframe_folder, text='Data to be analyzed') self.pathlabel.grid(row=0, column=0) self.pathbutton = tk.Button(self.subframe_folder, text='Press to select data to analyze', command=self.select_images_tracked_analysis) self.pathbutton.grid(row=1, column=0) self.pathchosen = tk.Label(self.subframe_folder, text='no path chosen') self.pathchosen.grid(row=2, column=0) self.multiplecheck = tk.Checkbutton(self.subframe_folder, text='More than one folder', variable=self.multiplefolders_var, command=self.multiplefolder_callback, onvalue=True, offvalue=False) self.multiplefolders_var.set(self.multiplefolders) self.multiplecheck.grid(row=0, column=1) self.common_str_label = tk.Label(self.subframe_folder, text='What character string ' '\ndo all folders you want' '\nto analyze, share? ') self.common_str_label.grid(row=1, column=1) self.common_str = tk.StringVar() self.common_str_entry = tk.Entry(self.subframe_folder, width=10, textvariable=self.common_str) self.common_str_entry.grid(row=2, column=1) self.common_str.set('*') self.common_str_counted = tk.Label(self.subframe_folder, text='') self.common_str_counted.grid(row=3, column=1) # Other options self.subframe_option = tk.LabelFrame(self, text='') self.subframe_option.grid(row=2, column=0, sticky='nsew') self.analysis_type_label = tk.Label(self.subframe_option, text='What Analysis ' 'do\nyou want to do?') self.analysis_type_label.grid(row=0, column=0) self.analysis_type = tk.OptionMenu(self.subframe_option, self.analysis_type_var, *self.analysis_available_modes) self.analysis_type.grid(row=1, column=0) self.subframe_figure_size = tk.LabelFrame(self, text='') self.subframe_figure_size.grid(row=2, column=1, sticky='nsew') self.size_selection_label = tk.Label(self.subframe_figure_size, text='Select size\nof ' 'Figure') self.size_selection_label.grid(row=0, column=1) # make a button for user to change how large the display window should be self.size_button = tk.OptionMenu(self.subframe_figure_size, self.size_of_window, *self.available_size_of_window) self.size_button.grid(row=1, column=1) self.start_analysis_button = tk.Button(self, text='Press to start ' 'analysis', command=self.start_analysis) self.start_analysis_button.grid(row=3, column=0, columnspan=2) # callback every 700 ms self.menu_callback_shared() def start_analysis(self): """ This function is called when the user presses the "Press to start analysis" button. It checks which of the analysis options has been chosen: #. Distance to source: this will pass the image files to :py:class:`analysis_scripts.AnalysisDistanceToSource` #. Distance to source, VR: this will pass the image files to :py:class:`analysis_scripts.AnalysisVRDistanceToSource` #. Single animal tracking: this will pass the image file to :py:class:`control_file.ControlTracking` """ if self.analysis_type_var.get() == 'Distance to source': print(self.analysis_type_var.get()) analysis_scripts.AnalysisDistanceToSource( path=self.controller.all_common_variables.data_path, multiple_files=self.multiplefolders_var.get(), string=self.common_str.get(), size_of_window=self.size_of_window, controller=self.controller) elif self.analysis_type_var.get() == 'Distance to source, VR': print(self.analysis_type_var.get()) analysis_scripts.AnalysisVRDistanceToSource( path=self.controller.all_common_variables.data_path, multiple_files=self.multiplefolders_var.get(), string=self.common_str.get(), controller=self.controller) elif self.analysis_type_var.get() == 'Single animal tracking': # This variable is necessary as the code will go briefly # back to the main GUI while being in the wrong folder. # It will then crash. The fix is to save the state of the # variable self.files_and_folders_common in a new # variable that is not affected by GUI common_files_and_folders = self.files_and_folders_common def collect_info_and_start_tracking(): images, fps = initialize_image_data.get_self_images() save_experiment_settings = False # Will be changed # to True in case the experiment_settings file does # not exists yet! try: with open((os.getcwd() + '/experiment_settings.json'),'r') as file: self.controller.all_common_variables.experimental_metadata = json.load( file) try: self.controller.all_common_variables.framerate_read_from_experiment_settings = \ self.controller.all_common_variables.experimental_metadata[ 'Framerate'] framerate = \ self.controller.all_common_variables.framerate_read_from_experiment_settings except KeyError: self.controller.all_common_variables.framerate_read_from_experiment_settings = None try: pixel_per_mm = \ self.controller.all_common_variables.experimental_metadata[ 'Pixel per mm'] except ValueError: pixel_per_mm = None try: self.controller.all_common_variables.organism_read_from_experiment_settings = \ self.controller.all_common_variables.experimental_metadata[ 'Model Organism'] except KeyError: self.controller.all_common_variables.organism_read_from_experiment_settings = None except (ValueError, AttributeError, FileNotFoundError): save_experiment_settings = True # if pixel_per_mm never got defined ask user to do it # here! tk.messagebox.showinfo('Define Pixel per mm', 'To enable tracking, ' 'please select a file' '\n(image, video etc.) ' 'and define a known ' '\ndistance in that image.' '\n\n' 'Please consult the manual if ' 'unclear what to do!') distance_factor = distance_configuration_module \ .DistanceConfigurationStatic( known_distance=None) pixel_per_mm = ( round(distance_factor.distance_factor, 3)) print(pixel_per_mm) if self.controller.all_common_variables.framerate_read_from_experiment_settings is None: if fps is not None: framerate = int(fps) else: # child = tk.Toplevel() # child.grab_set() framerate = simpledialog.askinteger( "Framerate", "What is the framerate" "\nof the image sequence/video?", parent=self, minvalue=0, maxvalue=100000) # child.destry() else: framerate = self.controller.all_common_variables.framerate_read_from_experiment_settings # As this means that no experiment_settings.json file # exists yet, it can be created now and be used in # the future save_experiment_settings = True experiment_settings = {} experiment_settings["Pixel per mm"] = pixel_per_mm experiment_settings["Framerate"] = framerate experiment_settings["Resolution"] = repr( images.shape[1]) + 'x' + repr( images.shape[0]) experiment_settings['Recording time'] = \ images.shape[2] \ / framerate playback_speed = 100000 # todo - no playbay possible # anyway! This is an orphan variable, only used # to pass something to the controlTracking class - probably can get rid off # not sure this is true as it's possible to run this # with Debug Mode on # define the boxsize try: # boxsize is the area in all four directions # starting from the bounding box of the animal # that will be used in the next frame to look for # the animal. It depends on the spee of the # animal and the framerate boxsize is calculated # for each experiment: # max_speed_of_animal_in_mm_per_sec / framerate ( # to get speed in mm/frames) then multiplied by # pixel per mm to get speed in pixel/frame, # the actual parameter that is needed for the program to # function. This is then multiplied by two to # give more wiggle room as some animals move very # fast sometimes boxsize = (organisms_and_heuristics[ self.controller.all_common_variables.organism_read_from_experiment_settings][ 'max_speed_animal_mm_per_s'] / int( framerate) * pixel_per_mm) # since the boxsize now also depends on the max # speed of the animal and the framerate it can # become extremely small (i.e. a larva that moves # 2mm/s at 30frames will only move 0.066mm which # at 5px/mm is only 0.33pixel # If in any frame the animal is not completely # detected the algorithm could never recover. # This is why the length of the animal must be # taken into account as well! boxsize = boxsize + (organisms_and_heuristics[ self.controller.all_common_variables.organism_read_from_experiment_settings][ 'max_skeleton_length_mm'] * pixel_per_mm) except KeyError: # This happens for example when # experiment_settings.json # does not exists tk.messagebox.showinfo( "Not in List", "Can not find your organism in the " "'list_of_available_organisms.json'\n" "You are either missing the " "experiment_settings.json file in your " "experimental folder or\n" "use an undefined animal\n") max_speed_animal = simpledialog.askinteger( "Estimate movement speed", "Estimate the maximal movement speed" "\nof the animal you are tracking here." "\n\n" "For your reference:" "\na fruit fly larva crawls at up to 2mm/s and " "\nan adult fruit fly walks at up to 150mm/s", parent=self, minvalue=0, maxvalue=100000) boxsize = ( max_speed_animal/ int(framerate) * pixel_per_mm) skel_leng = simpledialog.askinteger( "Estimate animal length", "Estimate the maximal animal length of" "\nthe animal you are tracking here." "\n\n" "For you reference, a fruit fly larva is about" "\n5mm long", parent=self, minvalue=0, maxvalue=100000) boxsize = \ boxsize \ + (skel_leng* pixel_per_mm) print('boxsize' + repr(boxsize)) # get the model organism model_organism = self.controller.all_common_variables.organism_read_from_experiment_settings if model_organism is None: model_organism = 'Not in list' try: experiment_settings["Model Organism"] = \ model_organism except NameError: # not defined: pass # get the genotype genotype = self.controller.all_common_variables.genotype_read_from_experiment_settings # As PiVR version where the video was recorded might # differ from the version that is used for analysis, # be explicit here: self.controller.all_common_variables.experimental_metadata['PiVR info (tracking)'] = VERSION_INFO # Save info whether undistort was done online if self.controller.all_common_variables.online_undistort_bool.get(): online_undistort = 'True' else: online_undistort = 'False' self.controller.all_common_variables.experimental_metadata['Online undistort performed'] = online_undistort #if save_experiment_settings: with open(('experiment_settings.json'), 'w') as file: json.dump(self.controller.all_common_variables.experimental_metadata, file, sort_keys=True, indent=4) # Grab the relevant undistort files self.controller.all_common_functions.grab_undistort_files( repr(images.shape[1]) + 'x' + repr(images.shape[0])) control_file.ControlTracking( boxsize=boxsize, signal=self.controller.all_common_variables.signal, cam=images, genotype=genotype, recording_framerate=framerate, recordingtime=None, pixel_per_mm=pixel_per_mm, model_organism=model_organism, display_framerate=playback_speed, offline_analysis=True, organisms_and_heuristics=organisms_and_heuristics, post_hoc_tracking=True, debug_mode=self.controller.all_common_variables.debug_mode_var.get(), save_centroids_npy=self.controller.all_common_variables.save_centroids_npy.get(), save_heads_npy=self.controller.all_common_variables.save_heads_npy.get(), save_tails_npy=self.controller.all_common_variables.save_tails_npy.get(), save_midpoints_npy=self.controller.all_common_variables.save_midpoints_npy.get(), save_bbox_npy=self.controller.all_common_variables.save_bbox_npy.get(), save_thresh_npy=self.controller.all_common_variables.save_thresh_npy.get(), save_skeleton_npy=self.controller.all_common_variables.save_skel_npy.get(), undistort_dst=self.controller.all_common_variables.undistort_dst_file, undistort_mtx=self.controller.all_common_variables.undistort_mtx_file, newcameramtx=self.controller.all_common_variables.newcameramtx, ) if self.multiplefolders_var.get(): print('Will analyze ' + repr(len(self.files_and_folders_common)) + ' folders') for i_folder in range(len(common_files_and_folders)): print('Analyzing Folder: ' + self.controller.all_common_variables.data_path + '//' + common_files_and_folders[i_folder]) os.chdir( self.controller.all_common_variables.data_path + '//' + common_files_and_folders[i_folder]) collect_info_and_start_tracking() else: os.chdir(self.controller.all_common_variables.data_path) collect_info_and_start_tracking() def multiplefolder_callback(self): if self.multiplefolders_var.get(): print('multiplefolders') self.controller.access_subframes( 'TrackedAnalysisFrame').common_str_label.config( state='normal') self.controller.access_subframes( 'TrackedAnalysisFrame').common_str_entry.config( state='normal') else: print('one folder') self.controller.access_subframes( 'TrackedAnalysisFrame').common_str_label.config( state='disabled') self.controller.access_subframes( 'TrackedAnalysisFrame').common_str_entry.config( state='disabled') self.controller.access_subframes( 'TrackedAnalysisFrame').common_str_counted.configure(text='') self.just_changed = True def menu_callback_shared(self): # call this after every 700ms self.after(700, self.menu_callback_shared) if self.multiplefolders_var.get() and self.just_changed: # self.common_str_label.grid() # self.common_str.grid() pass elif self.just_changed: # self.common_str_label.grid_remove() # self.common_str_label.grid_remove() pass self.just_changed = False entered_text = self.common_str.get() self.files_and_folders_common = [p.replace('\\', '') for p in glob('*' + entered_text + '*/')] try: if self.controller.access_subframes( 'TrackedAnalysisFrame').multiplefolders_var.get(): self.common_str_counted.configure( text='Number of files: ' + repr(len(self.files_and_folders_common))) except KeyError: # Happens during instantiation pass # label = tk.Label(self, text="This is page is for Analysis - \nstill needs to be implemente", font=controller.title_font) # label.pack(side="top", fill="x", pady=10) # button = tk.Button(self, text="Go to the start page", # command=lambda: controller.show_frame("TrackingFrame")) # button.pack() def select_images_tracked_analysis(self): ''' self.image_path = filedialog.askdirectory() os.chdir(self.image_path) self.cam = self.image_path + '\\all_images.npy' path_to_show = '...' + self.image_path[-20:] self.pathchosen.configure(text=path_to_show) ''' self.controller.all_common_variables.data_path = filedialog.askdirectory() os.chdir(self.controller.all_common_variables.data_path) self.cam = self.controller.all_common_variables.data_path + DIRECTORY_INDICATOR + 'all_images.npy' path_to_show = '...' + self.controller.all_common_variables.data_path[-20:] self.pathchosen.configure(text=path_to_show) class ImageDataHandling(tk.Frame): def __init__(self, parent, controller, camera_class=None): """ This constructor class is used to create the "Image Data Handling" frame in the "Tools" Menu. This frame allows the user to select either a single of a number of folders to analyze in several different ways """ tk.Frame.__init__(self, parent) self.controller = controller # the variable that gets changed when the option menu for # tools is selected self.conversion_type_var = tk.StringVar() # the variable that is assinged to the Checkbutton below self.multiplefolders_var = tk.IntVar() self.multiplefolders = True # todo import previous variables bla self.files_and_folders_common = None self.conversion_available_modes = ('Image Seq. to matrix file', 'Video conversion', 'Undistort Video') self.conversion_type_var.set(self.conversion_available_modes[0]) self.video_type_var = tk.StringVar() self.video_formats_available = ('avi', 'mp4', 'None') self.video_type_var.set(self.video_formats_available[0]) self.video_codec_var = tk.StringVar() self.video_codec_available = ('h264', #'mpeg2video', Terrible quality 'rawvideo') self.video_codec_var.set(self.video_codec_available[0]) self.video_color_var = tk.StringVar() self.video_color_available = ('greyscale', 'color') self.video_color_var.set(self.video_color_available[0]) self.image_color_var = tk.StringVar() self.image_color_available = ('greyscale', 'color') self.image_color_var.set(self.image_color_available[0]) self.image_output_zip_var = tk.IntVar() self.image_output_zip_var.set(1) self.delete_images_var = tk.IntVar() self.delete_images_var.set(1) self.image_output_npy_var = tk.IntVar() self.image_output_npy_var.set(1) self.image_output_mat_var = tk.IntVar() self.image_output_mat_var.set(0) self.video_output_npy_var = tk.IntVar() self.video_output_npy_var.set(0) self.video_output_mat_var = tk.IntVar() self.video_output_mat_var.set(0) self.video_output_emboss_stim = tk.IntVar() self.video_output_emboss_stim.set(0) self.subframe_title = tk.Label( self, text='Image Data Handling', font='Helvetica 12 bold') self.subframe_title.grid(row=0, column=0, columnspan=3) # Buttons etc. to select the desired folder(s) self.subframe_folder = tk.LabelFrame(self, text='Data') self.subframe_folder.grid(row=1, column=0, columnspan=3, sticky='nsew') self.pathlabel = tk.Label(self.subframe_folder, text='Image data to modify') self.pathlabel.grid(row=0, column=0) self.pathbutton = tk.Button(self.subframe_folder, text='Press to select data to ' 'modify', command=self.select_folder) self.pathbutton.grid(row=1, column=0) self.pathchosen = tk.Label(self.subframe_folder, text='no path chosen') self.pathchosen.grid(row=2, column=0) self.multiplecheck = tk.Checkbutton(self.subframe_folder, text='More than one folder', variable=self.multiplefolders_var, command=self.multiplefolder_callback, onvalue=True, offvalue=False) self.multiplefolders_var.set(self.multiplefolders) self.multiplecheck.grid(row=0, column=1) self.common_str_label = tk.Label(self.subframe_folder, text='What character string ' '\ndo all folders you want' '\nto modify, share? ') self.common_str_label.grid(row=1, column=1) self.common_str = tk.StringVar() self.common_str_entry = tk.Entry(self.subframe_folder, width=10, textvariable=self.common_str) self.common_str_entry.grid(row=2, column=1) self.common_str.set('*') self.common_str_counted = tk.Label(self.subframe_folder, text='') self.common_str_counted.grid(row=3, column=1) # Other options self.subframe_option = tk.LabelFrame(self, text='') self.subframe_option.grid(row=2, column=0, sticky='nsew') self.conversion_type_label = tk.Label(self.subframe_option, text='What modifications ' 'do\nyou want to do?') self.conversion_type_label.grid(row=0, column=0) self.conversion_type = tk.OptionMenu(self.subframe_option, self.conversion_type_var, *self.conversion_available_modes) self.conversion_type.grid(row=1, column=0) # Let user select desired video output self.subframe_video_options = tk.LabelFrame( self, text='Video options') self.subframe_video_options.grid(row=2, column=1, sticky='nsew') self.select_output_video_label = tk.Label( self.subframe_video_options, text='Format') self.select_output_video_label.grid(row=0,column=0) self.select_output_video_format = tk.OptionMenu( self.subframe_video_options, self.video_type_var, *self.video_formats_available) self.select_output_video_format.grid(row=0, column=1) self.select_codec_video_label = tk.Label( self.subframe_video_options, text='Codec') self.select_codec_video_label.grid(row=1, column=0) self.select_output_video_codec = tk.OptionMenu( self.subframe_video_options, self.video_codec_var, *self.video_codec_available) self.select_output_video_codec.grid(row=1, column=1) self.select_color_menu = tk.OptionMenu( self.subframe_video_options, self.video_color_var, *self.video_color_available, ) self.select_color_menu.grid(row=2, column=0, columnspan=2) self.npy_button_video = tk.Checkbutton( self.subframe_video_options, text='Save *.npy', variable=self.video_output_npy_var, onvalue=True, offvalue=False) self.npy_button_video.grid(row=3,column=0, columnspan=2) self.mat_button_video = tk.Checkbutton( self.subframe_video_options, text='Save *.mat', variable=self.video_output_mat_var, onvalue=True, offvalue=False) self.mat_button_video.grid(row=4,column=0, columnspan=2) self.emboss_stimulus_button_video = tk.Checkbutton( self.subframe_video_options, text='Emboss stimulus', variable=self.video_output_emboss_stim, onvalue=True, offvalue=False) self.emboss_stimulus_button_video.grid(row=5,column=0, columnspan=2) # Let user select output of image packing self.subframe_image_options = tk.LabelFrame( self, text='Image options') self.subframe_image_options.grid(row=2, column=2, sticky='nsew') self.zip_button = tk.Checkbutton( self.subframe_image_options, text='Zip images', variable=self.image_output_zip_var, onvalue=True, offvalue=False) self.zip_button.grid(row=0, column=0, sticky="W") self.delete_images = tk.Checkbutton( self.subframe_image_options, text='Delete original', variable=self.delete_images_var, onvalue=True, offvalue=False) self.delete_images.grid(row=1, column=0, sticky="W") self.select_color_images_menu = tk.OptionMenu( self.subframe_image_options, self.image_color_var, *self.image_color_available, ) self.select_color_images_menu.grid(row=2, column=0, sticky="W") self.npy_button = tk.Checkbutton( self.subframe_image_options, text='Save *.npy', variable=self.image_output_npy_var, onvalue=True, offvalue=False) self.npy_button.grid(row=3, column=0, sticky="W") self.mat_button = tk.Checkbutton( self.subframe_image_options, text='Save *.mat', variable=self.image_output_mat_var, onvalue=True, offvalue=False) self.mat_button.grid(row=4, column=0, sticky="W") self.start_conversion_button = tk.Button(self, text='Press to start ' 'conversion', command=self.start_conversion) self.start_conversion_button.grid(row=3, column=0, columnspan=3) # callback every 700 ms self.menu_callback_shared() def start_conversion(self): """ bar :return: """ if self.conversion_type_var.get() == 'Image Seq. to matrix file': image_data_handling.PackingImages( controller=self.controller, path = self.controller.all_common_variables.data_path, multiplefolders = self.multiplefolders_var.get(), folders = self.files_and_folders_common, zip=self.image_output_zip_var.get(), delete=self.delete_images_var.get(), npy=self.image_output_npy_var.get(), mat=self.image_output_mat_var.get(), color_mode = self.image_color_var.get() ) elif self.conversion_type_var.get() == 'Video conversion': image_data_handling.ConvertH264( path=self.controller.all_common_variables.data_path, multiplefolders=self.multiplefolders_var.get(), folders=self.files_and_folders_common, save_npy=self.video_output_npy_var.get(), save_mat=self.video_output_mat_var.get(), emboss_stimulus= self.video_output_emboss_stim.get(), color_mode = self.video_color_var.get(), output_video_format = self.video_type_var.get(), codec = self.video_codec_var.get() ) elif self.conversion_type_var.get() == 'Undistort Video': image_data_handling.UndistortH264( path=self.controller.all_common_variables.data_path, multiplefolders=self.multiplefolders_var.get(), folders=self.files_and_folders_common, save_npy=self.video_output_npy_var.get(), save_mat=self.video_output_mat_var.get(), #color_mode = self.video_color_var.get(), output_video_format = self.video_type_var.get(), codec=self.video_codec_var.get(), undistort_path=self.controller.all_common_variables.undistort_path ) def multiplefolder_callback(self): if self.multiplefolders_var.get(): print('multiplefolders') self.controller.access_subframes( 'ImageDataHandling').common_str_label.config( state='normal') self.controller.access_subframes( 'ImageDataHandling').common_str_entry.config( state='normal') else: print('one folder') self.controller.access_subframes( 'ImageDataHandling').common_str_label.config( state='disabled') self.controller.access_subframes( 'ImageDataHandling').common_str_entry.config( state='disabled') self.controller.access_subframes( 'ImageDataHandling').common_str_counted.configure(text='') def menu_callback_shared(self): # call this after every 700ms self.after(700, self.menu_callback_shared) entered_text = self.common_str.get() self.files_and_folders_common = [p.replace('\\', '') for p in glob( '*' + entered_text + '*/')] try: if self.controller.access_subframes( 'ImageDataHandling').multiplefolders_var.get(): self.common_str_counted.configure( text='Number of files: ' + repr(len(self.files_and_folders_common))) except KeyError: # Happens during instantiation pass try: if self.conversion_type_var.get() == 'Image Seq. to matrix file': self.controller.access_subframes( 'ImageDataHandling').select_output_video_format.config( state='disabled') self.controller.access_subframes( 'ImageDataHandling').select_output_video_codec.config( state='disabled') self.controller.access_subframes( 'ImageDataHandling').select_color_menu.config( state='disabled') self.controller.access_subframes( 'ImageDataHandling').npy_button_video.config( state='disabled') self.controller.access_subframes( 'ImageDataHandling').mat_button_video.config( state='disabled') self.controller.access_subframes( 'ImageDataHandling').emboss_stimulus_button_video.config( state='disabled') self.controller.access_subframes( 'ImageDataHandling').zip_button.config( state='normal') self.controller.access_subframes( 'ImageDataHandling').delete_images.config( state='normal') self.controller.access_subframes( 'ImageDataHandling').npy_button.config( state='normal') self.controller.access_subframes( 'ImageDataHandling').mat_button.config( state='normal') elif self.conversion_type_var.get() == 'Video conversion'\ or 'Undistort Video': self.controller.access_subframes( 'ImageDataHandling').select_output_video_format.config( state='normal') self.controller.access_subframes( 'ImageDataHandling').select_output_video_codec.config( state='normal') self.controller.access_subframes( 'ImageDataHandling').npy_button_video.config( state='normal') self.controller.access_subframes( 'ImageDataHandling').mat_button_video.config( state='normal') self.controller.access_subframes( 'ImageDataHandling').emboss_stimulus_button_video.config( state='normal') self.controller.access_subframes( 'ImageDataHandling').zip_button.config( state='disabled') self.controller.access_subframes( 'ImageDataHandling').delete_images.config( state='disabled') self.controller.access_subframes( 'ImageDataHandling').npy_button.config( state='disabled') self.controller.access_subframes( 'ImageDataHandling').mat_button.config( state='disabled') # the color is a bit special - only works for video # conversion, not for undistort! So force greyscale # in Undistort! if self.conversion_type_var.get() == 'Video conversion': self.controller.access_subframes( 'ImageDataHandling').select_color_menu.config( state='normal') else: self.controller.access_subframes( 'ImageDataHandling').video_color_var.set( self.video_color_available[0]) self.controller.access_subframes( 'ImageDataHandling').select_color_menu.config( state='disabled') except KeyError: # Breaks during instantiations pass def select_folder(self): """ When user clicks on "Press to select data to modify" this function is called: It lets the user select a folder, changes the current working directory and changes the path shown in the GUI. """ self.controller.all_common_variables.data_path = filedialog.askdirectory() os.chdir(self.controller.all_common_variables.data_path) path_to_show = '...' + self.controller.all_common_variables.data_path[-20:] self.pathchosen.configure(text=path_to_show) class DisplayTrackedImage(tk.Frame): def __init__(self, parent, controller, camera_class=None): tk.Frame.__init__(self, parent) self.controller = controller # if previous_variables bla # get image path # self.data_path = None # todo don't think we need, get rid after testing self.framerate = 0 self.undistort_done = False self.colormap_options = ('Greys_r', 'viridis', 'plasma', 'magma', 'afmhot', 'cool') self.colormap_variable = tk.StringVar() self.colormap_variable.set(self.colormap_options[0]) self.subframe_title = tk.Label( self, text='Display Tracked Experiment', font='Helvetica 12 bold') self.subframe_title.grid(row=0, column=0, columnspan=4) self.pathbutton = tk.Button(self, text='Select data to analyze', command=self.select_images_display_tracked_image) self.pathbutton.grid(row=1, column=0, columnspan=2) self.pathchosen = tk.Label(self, text='no data chosen yet') self.pathchosen.grid(row=2, column=0, columnspan=2) self.experiment_date_n_time_frame = tk.LabelFrame(self, text='Experiment Date/Time') self.experiment_date_n_time_frame.grid(row=3, column=0, sticky='nsew') self.experiment_date_n_time_label = tk.Label(self.experiment_date_n_time_frame, text='') self.experiment_date_n_time_label.grid(row=0, column=0) self.framerate_label_frame = tk.LabelFrame(self, text='Framerate') self.framerate_label_frame.grid(row=4, column=0, sticky='nsew') self.framerate_label_label = tk.Label(self.framerate_label_frame, text='') self.framerate_label_label.grid(row=0, column=0) self.recording_time_frame = tk.LabelFrame(self, text='Recorded Time') self.recording_time_frame.grid(row=5, column=0, sticky='nsew') self.recording_time_label = tk.Label(self.recording_time_frame, text='') self.recording_time_label.grid(row=0, column=0) self.resolution_frame = tk.LabelFrame(self, text='Resolution') self.resolution_frame.grid(row=6, column=0, sticky='nsew') self.resolution_label = tk.Label(self.resolution_frame, text='') self.resolution_label.grid(row=0, column=0) self.species_frame = tk.LabelFrame(self, text='Species') self.species_frame.grid(row=3, column=1, sticky='nsew') self.species_label = tk.Label(self.species_frame, text='') self.species_label.grid(row=0, column=0) self.genotype_frame = tk.LabelFrame(self, text='Exp. group') self.genotype_frame.grid(row=4, column=1, sticky='nsew') self.genotype_label = tk.Label(self.genotype_frame, text='') self.genotype_label.grid(row=0, column=0) self.pixel_per_mm_frame = tk.LabelFrame(self, text='Pixel per mm') self.pixel_per_mm_frame.grid(row=6, column=1, sticky='nsew') self.pixel_per_mm_label = tk.Label(self.pixel_per_mm_frame, text='') self.pixel_per_mm_label.grid(row=0, column=0) self.colormap_label = tk.Label(self, text='Select Colormap') self.colormap_label.grid(row=5, column=3) self.colormap_menu = tk.OptionMenu(self, self.colormap_variable, *self.colormap_options) self.colormap_menu.grid(row=5, column=4) self.start_analysis_button = tk.Button(self, text='Press to show \nthe behavior of the animal', command=self.visualize_experiment_func) self.start_analysis_button.grid(row=6, column=3, columnspan=2) # Display the image of the tracked animal # Due to the garbage collection of Python we need to keep the reference of the self.image_object! self.image_object = ImageTk.PhotoImage(Image.open(self.controller.path_of_program + 'pics/tracked_placeholder.jpg').resize((160, 120))) self.image_label = tk.Label(self, image=self.image_object) self.image_label.grid(row=3, column=2, rowspan=4) self.updated_image = None self.background_image = None self.experiment_csv = None self.common_str_counted = tk.Label(self, text='') self.common_str_counted.grid(row=4, column=1) self.undistort_data = None # callback every 700 ms self.menu_callback() def menu_callback(self): # call this after every 700ms self.after(700, self.menu_callback) def select_images_display_tracked_image(self): if self.controller.all_common_variables.data_path is not None: self.controller.all_common_variables.data_path = \ filedialog.askdirectory(initialdir=self.controller.all_common_variables.data_path) else: self.controller.all_common_variables.data_path = filedialog.askdirectory() os.chdir(self.controller.all_common_variables.data_path) self.cam = self.controller.all_common_variables.data_path + '/all_images.npy' path_to_show = '...' + self.controller.all_common_variables.data_path[-30:] self.pathchosen.configure(text=path_to_show) with open((self.controller.all_common_variables.data_path + '/experiment_settings.json'), 'r') as file: experiment_settings = json.load(file) try: self.experiment_date_n_time_label.configure(text=experiment_settings['Experiment Date and Time']) except KeyError: # if post-hoc experiment, this Key is not created and # would lead to a Key error without this try..except pass self.framerate_label_label.configure(text=repr(experiment_settings['Framerate']) + 'fps') self.framerate = experiment_settings['Framerate'] self.recording_time_label.configure(text=repr(experiment_settings['Recording time']) + 's') self.resolution_label.configure(text=experiment_settings['Resolution']) self.species_label.configure(text=experiment_settings['Model Organism']) if 'Exp. group' in experiment_settings: self.genotype_label.configure(text=experiment_settings['Exp. group']) elif 'Genotype' in experiment_settings: self.genotype_label.configure(text=experiment_settings['Genotype']) self.pixel_per_mm_label.configure(text=experiment_settings['Pixel per mm']) if 'Signal' in experiment_settings: self.signal = experiment_settings['Signal'] else: self.signal = self.controller.all_common_variables.signal if 'Online undistort performed'in experiment_settings: print(experiment_settings['Online undistort performed']) if experiment_settings['Online undistort performed'] == 'True': if CV2_INSTALLED: # prep the camera matrix #self.controller.all_common_functions.grab_undistort_files(experiment_settings['Resolution']) # read the mtx and dist data from the npz file: self.undistort_data = np.load(Path(self.controller.all_common_variables.data_path, 'undistort_matrices.npz')) else: tk.messagebox.showerror('opencv not installed' 'The experiment you have selected was recorded\n' 'with online undistortion.\n' 'To correctly display it you must install opencv\n' 'on this computer:\n' 'Open the terminal, activate pivr_environment and\n' 'type:' 'conda install -c conda-forge opencv\n' 'Then restart the software.') # todo - kick this out at one point - at the moment still have mixed folders! try: self.updated_image = ImageTk.PhotoImage(Image.open(self.controller.all_common_variables.data_path + '/Overview of SmA-T tracking.png').resize((160, 120))) except FileNotFoundError: try: self.updated_image = ImageTk.PhotoImage(Image.open(self.controller.all_common_variables.data_path + '/Overview of SmAl-VR tracking.png').resize( (160, 120))) except FileNotFoundError: try: self.updated_image = ImageTk.PhotoImage(Image.open(self.controller.all_common_variables.data_path + '/Overview of smAL-VR tracking.png').resize( (160, 120))) except FileNotFoundError: self.updated_image = ImageTk.PhotoImage(Image.open(self.controller.all_common_variables.data_path + '/Overview of tracking.png').resize( (160, 120))) self.image_label.configure(image=self.updated_image) self.image_label.image = self.updated_image try: self.background_image = imageio.imread( self.controller.all_common_variables.data_path + '/Background.tiff') except FileNotFoundError: self.background_image = imageio.imread( self.controller.all_common_variables.data_path + '/Background.jpg') files = [p.replace('\\', '') for p in glob('*')] # Since there's the suboptimal naming convention when # collecting a video of 'data.csv' to denote info about the # stimulus and when then doing analysis it's also called # 'data.csv' the loop below first collects all data.csv names # and selects the newest one (as analysis must have been done # after the data collection). files_of_interest = [] for i in files: if 'data.csv' in i: files_of_interest.append(i) #data_name = i if len(files_of_interest) == 1: data_name = files_of_interest[0] else: files_of_interest.sort() data_name = files_of_interest[-1] self.experiment_csv = pd.read_csv(self.controller.all_common_variables.data_path + '/' + data_name, sep=',') def visualize_experiment_func(self): visualize_tracked_experiment.VisualizeTrackedExperiment(csv_data=self.experiment_csv, background_image=self.background_image, data_path=self.controller.all_common_variables.data_path, recording_framerate=self.framerate, colormap=self.colormap_variable.get(), signal=self.signal, undistort_data=self.undistort_data) class SimulateOnlineExperiment(tk.Frame): def __init__(self, parent, controller, camera_class=None): tk.Frame.__init__(self, parent) # reference the camera and the controller self.camera_class = camera_class self.cam = self.camera_class.cam self.controller = controller # only start experiment if all conditions are fullfilled! self.can_experiment_start = True # self.data_path = None # todo Don't think we need, get rid after testing self.entered_text = None self.timestamps = None self.framerate = None # can't use a textvariable as we'll use both strings and numbers here! self.species = None self.genotype = None # Here the frame that will hold all the camera controls is being prepared. self.subframe_preexperiment = tk.LabelFrame(self, text='') self.subframe_preexperiment.grid(row=0, column=0) # construct the window by calling the SubFrames class with whatever we need. This instance is then bound to the # variable self.sub_frames to make it's variables etc available self.sub_frames = SubFrames(self.camera_class, self.controller, cam_frame=False, subframe_preexperiment=self.subframe_preexperiment, misc_frame=True, observation_mode=False, distance_configuration=False, model_organism=False, exp_ctr_frame=False, VR_arena=False, frame=self, quit_frame=False, display_experiment_settings=True, convert_images_frame=True ) def start_experiment_function(self): if self.controller.all_common_variables.framerate_read_from_experiment_settings is None: tk.messagebox.showerror("Warning", "No Framerate specified!\n" "Please define framerate that was used to record\n" "this video/image sequence in the 'Fix Metadata'\n" "menu before simulating the tracking") self.can_experiment_start = False else: framerate = self.controller.all_common_variables.framerate_read_from_experiment_settings if self.controller.all_common_variables.pixel_per_mm_read_from_experiment_settings is None: tk.messagebox.showerror("Warning", "No Pixel per mm specified!\n" "Please define pixel/mm that was used to record\n" "this video/image sequence in the 'Fix Metadata'\n" "menu before simulating the tracking") self.can_experiment_start = False else: pixel_per_mm = self.controller.all_common_variables.pixel_per_mm_read_from_experiment_settings if self.can_experiment_start: images, fps = initialize_image_data.get_self_images() ''' # currently ony resolution of 640x480 is allowed. Might be a new feature in the future if images.shape[0] != 480 or images.shape[1] != 640: tk.messagebox.showinfo('Incompatible resolution', 'You have selected an experiment with resolution ' + repr(images.shape[1]) + 'x' + repr(images.shape[0])+'\n' 'You can ancan analyze this experiment but for online tracking\n' 'you must select a resolution of 640x480') ''' if self.controller.all_common_variables.playback_speed_var.get() == '0.1X': playback_speed = framerate * 0.1 elif self.controller.all_common_variables.playback_speed_var.get() == '0.5X': playback_speed = framerate * 0.5 elif self.controller.all_common_variables.playback_speed_var.get() == '1X': playback_speed = framerate * 1 elif self.controller.all_common_variables.playback_speed_var.get() == '2X': playback_speed = framerate * 2 elif self.controller.all_common_variables.playback_speed_var.get() == '5X': playback_speed = framerate * 5 elif self.controller.all_common_variables.playback_speed_var.get() == 'Fastest': playback_speed = 100000 # as long as no one is using a supercomputer we should be fine, # otherwise add a zero! print('playback speed will be: ' + repr(playback_speed) + 'fps') # boxsize is the area in all four directions starting from the bounding box of the animal that will # be used in the next frame to look for the animal. It depends on the speed of the animal and the # framerate # boxsize is calculated for each experiment: # max_speed_of_animal_in_mm_per_sec / framerate (to get speed in mm/frames) then multiplied by # pixel per mm to get speed in pixel/frame, the actualy parameter that is needed for the program to # function. This is then multiplied by two to give more wiggle room as some animals move very fast sometimes boxsize = (organisms_and_heuristics[ self.controller.all_common_variables.organism_read_from_experiment_settings][ 'max_speed_animal_mm_per_s'] / int(framerate) * pixel_per_mm) # since the boxsize now also depends on the max speed of the animal and the framerate it can become extremely # small (i.e. a larva that moves 2mm/s at 30frames will only move 0.066mm which at 5px/mm is only 0.33pixel # If in any frame the animal is not completely detected the algorithm could never recover. This is why the # length of the animal must be taken into account as well! boxsize = boxsize + (organisms_and_heuristics[ self.controller.all_common_variables.organism_read_from_experiment_settings][ 'max_skeleton_length_mm'] * pixel_per_mm) # Version number can differ between recording and the # tracking done here. Clearly indicate it with open((os.getcwd() + '/experiment_settings.json'),'r+') as file: experiment_settings = json.load(file) experiment_settings['PiVR info (tracking)'] = VERSION_INFO with open((os.getcwd() + '/experiment_settings.json'),'w') as file: json.dump(experiment_settings, file, sort_keys=True, indent=4) print('boxsize is ' + repr(boxsize)) # Grab the relevant undistort files self.controller.all_common_functions.grab_undistort_files( repr(images.shape[1]) + 'x' + repr(images.shape[0])) control_file.ControlTracking(boxsize=boxsize, signal=self.controller.all_common_variables.signal, cam=images, base_path=None, genotype=self.controller.all_common_variables.genotype_read_from_experiment_settings, recording_framerate=framerate, recordingtime=None, resolution=[images.shape[1], images.shape[0]], # preview=None, # preview_resize=self.controller.all_common_variables.observation_resize_variable.get(), pixel_per_mm=float(self.controller.access_subframes( self.controller.current_window).sub_frames.pixel_per_mm_label['text']), model_organism=self.controller.all_common_variables.organism_read_from_experiment_settings, pwm_object=None, time_dependent_stim_file=None, organisms_and_heuristics=organisms_and_heuristics, debug_mode='ON', animal_detection_mode=self.controller.all_common_variables.animal_detection_method_var.get(), display_framerate=playback_speed, simulated_online_analysis=True, undistort_dst=self.controller.all_common_variables.undistort_dst_file, undistort_mtx=self.controller.all_common_variables.undistort_mtx_file, newcameramtx=self.controller.all_common_variables.newcameramtx, ) ''' # KEEP - might be implemented in a future version class FixMetadata(tk.Frame): def __init__(self, parent, controller, camera_class=None): tk.Frame.__init__(self, parent) # reference the camera and the controller self.camera_class = camera_class self.cam = self.camera_class.cam self.controller = controller self.entered_text = None self.timestamps = None self.framerate = None # can't use a textvariable as we'll use both strings and numbers here! self.species = None self.genotype = None # Here the frame that will hold all the camera controls is being prepared. self.subframe_preexperiment = tk.LabelFrame(self, text='Fix Metadata') self.subframe_preexperiment.grid(row=0, column=0) # construct the window by calling the SubFrames class with whatever we need. This instance is then bound to the # variable self.sub_frames to make it's variables etc available self.sub_frames = SubFrames(self.camera_class, self.controller, cam_frame=False, subframe_preexperiment=self.subframe_preexperiment, misc_frame=True, observation_mode=False, distance_configuration=False, model_organism=False, update_metadata=True, exp_ctr_frame=False, VR_arena=False, frame=self, quit_frame=False, display_experiment_settings=True, fix_metadata=True, convert_images_frame=False ) ''' class MultiAnimalTracking(tk.Frame): def __init__(self, parent, controller, camera_class=None): tk.Frame.__init__(self, parent) self.controller = controller # if previous_variables bla # get image path self.data_path = None self.framerate = 0 self.colormap_options = ('Greys_r', 'viridis', 'plasma', 'magma', 'afmhot', 'cool') self.colormap_variable = tk.StringVar() self.colormap_variable.set(self.colormap_options[0]) self.subframe_title = tk.Label( self, text='Multi-Animal Tracking', font='Helvetica 12 bold') self.subframe_title.grid(row=0, column=0, columnspan=5) self.pathbutton = tk.Button(self, text='Select data to analyze', command=self.select_images_multi_animal_tracking) self.pathbutton.grid(row=1, column=0, columnspan=2) self.pathchosen = tk.Label(self, text='no data chosen yet') self.pathchosen.grid(row=2, column=0, columnspan=2) self.experiment_date_n_time_frame = tk.LabelFrame(self, text='Experiment Date/Time') self.experiment_date_n_time_frame.grid(row=3, column=0, sticky='nsew') self.experiment_date_n_time_label = tk.Label( self.experiment_date_n_time_frame, text='') self.experiment_date_n_time_label.grid(row=0, column=0) self.framerate_label_frame = tk.LabelFrame(self, text='Framerate') self.framerate_label_frame.grid(row=4, column=0, sticky='nsew') self.framerate_label_label = tk.Label(self.framerate_label_frame, text='') self.framerate_label_label.grid(row=0, column=0) self.recording_time_frame = tk.LabelFrame(self, text='Recorded Time') self.recording_time_frame.grid(row=5, column=0, sticky='nsew') self.recording_time_label = tk.Label(self.recording_time_frame, text='') self.recording_time_label.grid(row=0, column=0) self.resolution_frame = tk.LabelFrame(self, text='Resolution') self.resolution_frame.grid(row=6, column=0, sticky='nsew') self.resolution_label = tk.Label(self.resolution_frame, text='') self.resolution_label.grid(row=0, column=0) self.species_frame = tk.LabelFrame(self, text='Species') self.species_frame.grid(row=3, column=1, sticky='nsew') self.species_label = tk.Label(self.species_frame, text='') self.species_label.grid(row=0, column=0) self.genotype_frame = tk.LabelFrame(self, text='Exp. group') self.genotype_frame.grid(row=4, column=1, sticky='nsew') self.genotype_label = tk.Label(self.genotype_frame, text='') self.genotype_label.grid(row=0, column=0) self.pixel_per_mm_frame = tk.LabelFrame(self, text='Pixel per mm') self.pixel_per_mm_frame.grid(row=6, column=1, sticky='nsew') self.pixel_per_mm_label = tk.Label(self.pixel_per_mm_frame, text='') self.pixel_per_mm_label.grid(row=0, column=0) self.colormap_label = tk.Label(self, text='Select Colormap') self.colormap_label.grid(row=5, column=3) self.colormap_menu = tk.OptionMenu(self, self.colormap_variable, *self.colormap_options) self.colormap_menu.grid(row=5, column=4) self.start_analysis_button = tk.Button(self, text='Press to show \nthe behavior of the animals', command=self.visualize_experiment_func) self.start_analysis_button.grid(row=6, column=3, columnspan=2) self.background_image = None self.experiment_csv = None self.common_str_counted = tk.Label(self, text='') self.common_str_counted.grid(row=5, column=1) # callback every 700 ms self.menu_callback() def menu_callback(self): # call this after every 700ms self.after(700, self.menu_callback) def select_images_multi_animal_tracking(self): if self.data_path is not None: self.data_path = filedialog.askdirectory(initialdir=self.data_path) else: self.data_path = filedialog.askdirectory() os.chdir(self.data_path) self.cam = self.data_path + '/all_images.npy' path_to_show = '...' + self.data_path[-30:] self.pathchosen.configure(text=path_to_show) try: with open((self.data_path + '/experiment_settings.json'), 'r') as file: experiment_settings = json.load(file) try: self.experiment_date_n_time_label.configure(text=experiment_settings['Experiment Date and Time']) self.framerate_label_label.configure(text=repr(experiment_settings['Framerate']) + 'fps') self.framerate = experiment_settings['Framerate'] self.recording_time_label.configure(text=repr(experiment_settings['Recording time']) + 's') self.resolution_label.configure(text=experiment_settings['Resolution']) self.species_label.configure(text=experiment_settings['Model Organism']) if 'Exp. group' in experiment_settings: self.genotype_label.configure(text=experiment_settings['Exp. group']) elif 'Genotype' in experiment_settings: self.genotype_label.configure(text=experiment_settings['Genotype']) self.pixel_per_mm_label.configure( text=experiment_settings['Pixel per mm']) except KeyError: print('experiment_settings.json does not contain ' 'expected Key values!') except FileNotFoundError: tk.messagebox.showerror( 'Experiment_settings.json not found', "Unable to find a 'experiment_settings.json' file " "in the folder you selected. " "\n\n" "Please select a different folder.") def visualize_experiment_func(self): multi_animal_tracking.MultiAnimalTracking(data_path=self.data_path, colormap=self.colormap_variable.get(), recording_framerate=self.framerate, organisms_and_heuristics=organisms_and_heuristics) def place_window(the_window): """ This function places the Window on the far right of the screen. this helps with the fact that when the preview window is opened it's always on the top left """ the_window.update_idletasks() # this became necessary after installing everything with pip width_window = the_window.winfo_width() # width for the Tk root height_window = the_window.winfo_height() # height for the Tk root # get screen width and height width_screen = the_window.winfo_screenwidth() # width of the screen height_screen = the_window.winfo_screenheight() # height of the screen # calculate x and y coordinates for the Tk root window x_position_window = width_screen - width_window y_position_window = 0# hs - h # set the dimensions of the screen # and where it is placed # UNCLEAR why this broke when installing everything with pip and opencv. # Try to fix in the future. For now, just remove the placement call the_window.geometry('%dx%d+%d+%d' % (width_window, height_window+30, x_position_window, y_position_window)) if __name__ == "__main__": app = PiVR() #print('app.winfo_screenwidth() ' + repr(app.winfo_screenwidth())) #screenwidth = app.winfo_screenwidth() #screenheight = app.winfo_screenheight() if RASPBERRY: place_window(app) app.mainloop()