From 8f7611df8870afe50c3e16bb9539e75a90366801 Mon Sep 17 00:00:00 2001
From: Raymond Chia <rqchia@janus0.ihpc.uts.edu.au>
Date: Sat, 18 Nov 2023 00:14:35 +1100
Subject: [PATCH] mvp

---
 3                                       | 855 ++++++++++++++++++++++++
 modules/.datapipeline.py.swo            | Bin 45056 -> 0 bytes
 modules/.digitalsignalprocessing.py.swp | Bin 16384 -> 0 bytes
 modules/.utils.py.swp                   | Bin 16384 -> 0 bytes
 regress_rr.py                           | 319 +++++----
 5 files changed, 1048 insertions(+), 126 deletions(-)
 create mode 100644 3
 delete mode 100644 modules/.datapipeline.py.swo
 delete mode 100644 modules/.digitalsignalprocessing.py.swp
 delete mode 100644 modules/.utils.py.swp

diff --git a/3 b/3
new file mode 100644
index 0000000..c7387fd
--- /dev/null
+++ b/3
@@ -0,0 +1,855 @@
+import glob
+from os import makedirs, mkdir
+from os.path import join, exists
+import pandas as pd
+import numpy as np
+import json
+import ipdb
+import re
+import pickle
+import sys
+import time
+from zipfile import ZipFile
+
+import argparse
+from datetime import datetime, timedelta, timezone, timedelta
+import pytz
+
+import matplotlib.pyplot as plt
+from functools import partial
+from collections import Counter
+from itertools import repeat, chain, combinations
+from multiprocessing import Pool, cpu_count
+import tensorflow as tf
+
+from sklearn.preprocessing import StandardScaler, MinMaxScaler, OneHotEncoder
+from sklearn.preprocessing import PolynomialFeatures, LabelEncoder
+from sklearn.model_selection import KFold, train_test_split
+from sklearn.metrics import accuracy_score
+
+from tsfresh.feature_extraction import extract_features
+from tsfresh.feature_extraction import settings as tsfresh_settings
+from tsfresh.utilities.string_manipulation import get_config_from_string
+
+from modules.datapipeline import get_file_list, load_and_snip, load_data, \
+        load_split_data, load_harness_data
+from modules.digitalsignalprocessing import vectorized_slide_win as vsw
+from modules.digitalsignalprocessing import imu_signal_processing
+from modules.digitalsignalprocessing import do_pad_fft,\
+        pressure_signal_processing, infer_frequency
+from modules.utils import *
+
+from modules.evaluations import Evaluation
+from modules.datapipeline import get_windowed_data, DataSynchronizer,\
+        parallelize_dataframe
+from modules.datapipeline import ProjectFileHandler
+from models.ardregression import ARDRegressionClass
+from models.knn import KNNClass
+from models.svm import SVMClass
+from models.lda import LDAClass
+from models.svr import SVRClass
+from models.logisticregression import LogisticRegressionClass
+from models.linearregression import LinearRegressionClass
+from models.neuralnet import FNN_HyperModel, LSTM_HyperModel, TunerClass,\
+        CNN1D_HyperModel
+from models.ridgeclass import RidgeClass
+from models.resnet import Regressor_RESNET, Classifier_RESNET
+from models.xgboostclass import XGBoostClass
+
+from pprint import PrettyPrinter
+
+from sktime.transformations.panel.rocket import (
+        MiniRocket,
+        MiniRocketMultivariate,
+        MiniRocketMultivariateVariable,
+)
+
+from config import WINDOW_SIZE, WINDOW_SHIFT, IMU_FS, DATA_DIR, BR_FS
+
+IMU_COLS =  ['acc_x', 'acc_y', 'acc_z', 'gyr_x', 'gyr_y', 'gyr_z']
+
+def utc_to_local(utc_dt, tz=None):
+    return utc_dt.replace(tzinfo=timezone.utc).astimezone(tz=tz)
+
+def datetime_from_utc_to_local(utc_datetime):
+    now_timestamp = time.time()
+    offset = datetime.fromtimestamp(now_timestamp) - datetime.utcfromtimestamp(now_timestamp)
+    return utc_datetime + offset
+
+# Load data
+def load_bioharness_file(f:str, skiprows=0, skipfooter=0, **kwargs):
+    df_list = []
+    method = partial(pd.read_csv, skipinitialspace=True,
+                     skiprows=list(range(1, skiprows+1)),
+                     skipfooter=skipfooter,
+                     header=0,
+                     **kwargs
+                    )
+    df = method(f)
+    if 'Time' not in df.columns.values:
+        df['Time'] = pd.to_datetime(
+            df.rename(columns={'Date':'Day'})[
+                ['Day','Month','Year']]) \
+                + pd.to_timedelta(df['ms'], unit='ms')
+        if pd.isna(df['Time']).any():
+            df['Time'].interpolate(inplace=True)
+        df['Time'] = pd.to_datetime(df['Time'], format="%d/%m/%Y %H:%M:%S.%f")
+        df['Time'] = df['Time'].dt.strftime("%d/%m/%Y %H:%M:%S.%f")
+    return df
+
+def load_bioharness_files(f_list:list, skiprows=0, skipfooter=0, **kwargs):
+    df_list = []
+    method = partial(pd.read_csv, skipinitialspace=True,
+                     skiprows=list(range(1, skiprows+1)),
+                     skipfooter=skipfooter,
+                     header=0, **kwargs)
+    for f in f_list:
+        df_list.append(load_bioharness_file(f))
+
+    df = pd.concat(df_list, ignore_index=True)
+    return df
+
+def bioharness_datetime_to_seconds(val):
+    fmt = "%d/%m/%Y %H:%M:%S.%f" 
+    dstr = datetime.strptime(val, fmt)
+    seconds = dstr.timestamp()
+    return seconds
+
+def load_imu_file(imu_file:str):
+    hdr_file = imu_file.replace('imudata.gz', 'recording.g3')
+
+    df = pd.read_json(imu_file, lines=True, compression='gzip')
+    hdr = pd.read_json(hdr_file, orient='index')
+    hdr = hdr.to_dict().pop(0)
+
+    if df.empty: return df, hdr
+
+    data_df = pd.DataFrame(df['data'].tolist())
+    df = pd.concat([df.drop('data', axis=1), data_df], axis=1)
+
+    iso_tz = hdr['created']
+    tzinfo = pytz.timezone(hdr['timezone'])
+    # adjust for UTC
+    start_time = datetime.fromisoformat(iso_tz[:-1])
+    start_time = utc_to_local(start_time, tz=tzinfo).astimezone(tzinfo)
+
+    na_inds = df.loc[pd.isna(df['accelerometer']), :].index.values
+    df.drop(index=na_inds, inplace=True)
+
+    imu_times = df['timestamp'].values
+    df['timestamp_interp'] = imu_times
+    df['timestamp_interp'] = df['timestamp_interp'].interpolate()
+    imu_times = df['timestamp_interp'].values
+    imu_datetimes = [start_time + timedelta(seconds=val) \
+                     for val in imu_times]
+    imu_s = np.array([time.timestamp() for time in imu_datetimes])
+    df['sec'] = imu_s
+
+    time_check_thold = df['sec'].min() + 3*3600
+    mask = df['sec'] > time_check_thold
+    if np.any(mask):
+        df = df[np.logical_not(mask)]
+
+    return df, hdr
+
+def load_imu_files(f_list:list):
+    data, hdr = [], []
+    tmp = []
+    for f in f_list:
+        tmp.append(load_imu_file(f))
+    for l in tmp:
+        data.append(l[0])
+        hdr.append(l[1])
+    data_df = pd.concat(data, axis=0)
+    return data_df, hdr
+
+def load_e4_file(e4_file:str):
+    ''' First row is the initial time of the session as unix time.
+    Second row is the sample rate in Hz'''
+    zip_file = ZipFile(e4_file)
+    dfs = {csv_file.filename: pd.read_csv(zip_file.open(csv_file.filename)
+                                          ,header=None)
+           for csv_file in zip_file.infolist()
+           if csv_file.filename.endswith('.csv')}
+    bvp = dfs["BVP.csv"]
+    t0 = bvp.iloc[0].values[0]
+    fs = bvp.iloc[1].values[0]
+    nsamples = len(bvp) - 2
+
+    t0_datetime = datetime.utcfromtimestamp(t0)
+    t0_local = datetime_from_utc_to_local(t0_datetime)
+    time = [t0_local.timestamp() + ind*(1/fs) for ind in
+            range(nsamples)]
+    tmp = [np.nan, np.nan]
+    time = tmp + time
+    bvp.rename(columns={0: "bvp"}, inplace=True)
+    bvp['sec'] = np.array(time)
+
+    head = bvp.iloc[[0, 1]]
+    bvp.drop(inplace=True, index=[0, 1])
+
+    hdr = {'start_time': head.iloc[0,0],
+           'fs': head.iloc[0,1]}
+
+    return bvp, hdr
+
+def load_e4_files(f_list:list):
+    tmp = []
+    data = []
+    hdr = []
+    for f in f_list:
+        tmp.append(load_e4_file(f))
+    for d, h in tmp:
+        data.append(d)
+        hdr.append(h)
+    data_df = pd.concat(data, axis=0)
+    return data_df, hdr
+
+# Synchronising data
+def sync_to_ref(df0, df1):
+    dsync0 = DataSynchronizer()
+    dsync1 = DataSynchronizer()
+
+    time0 = df0['sec'].values
+    time1 = df1['sec'].values
+
+    t0 = max((time0[0], time1[0]))
+    t1 = min((time0[-1], time1[-1]))
+    dsync0.set_bounds(time0, t0, t1)
+    dsync1.set_bounds(time1, t0, t1)
+
+    return dsync0.sync_df(df0), dsync1.sync_df(df1)
+
+def pss_br_calculations(win, pss_df=None, br_df=None):
+    n_out = 5
+    if win[-1] == 0: return [None]*n_out
+
+    dsync = DataSynchronizer()
+    pss_fs = BR_FS
+    pss_col = [col for col in pss_df.columns.values if\
+               'breathing' in col.lower()][0]
+    pss_ms = pss_df['ms'].values
+    br_ms  = br_df['ms'].values
+    t0, t1 = pss_ms[win][0], pss_ms[win][-1]
+
+    diff = pss_ms[win][1:] - pss_ms[win][:-1]
+    mask = np.abs(diff/1e3) > 60
+    diff_chk = np.any(mask)
+    if diff_chk: return [None]*n_out
+
+    # Get pressure estimate for window
+    pss_win = pss_df.iloc[win]
+    pss_data = pss_win[pss_col]
+    pss_filt = pressure_signal_processing(pss_data, fs=pss_fs)
+    xf, yf = do_pad_fft(pss_filt, fs=pss_fs)
+    pss_est = xf[yf.argmax()]*60
+
+    # Sync and get summary br output
+    dsync.set_bounds(br_ms, t0, t1)
+    br_win = dsync.sync_df(br_df)
+
+    br_out = np.median(br_win['BR'].values)
+
+    # Get subject and condition
+    sbj_out = pss_win['subject'].values[0]
+    time_out = np.median(pss_win['sec'].values)
+    return time_out, pss_est, br_out, sbj_out, cond_out
+
+def get_pss_br_estimates(pss_df, br_df, window_size=12, window_shift=1):
+    pss_fs = BR_FS
+    # pss_col = [col for col in pss_df.columns.values if\
+    #            'breathing' in col.lower()][0]
+    pss_ms = pss_df['sec'].values
+    br_ms  = br_df['sec'].values
+
+    inds = np.arange(0, len(pss_ms))
+    vsw_out = vsw(inds, len(inds), sub_window_size=int(window_size*pss_fs),
+                  stride_size=int(window_shift*pss_fs))
+
+    # dsync = DataSynchronizer()
+    pss_est, br_out = [], []
+    cond_out, sbj_out = [], []
+    func = partial(pss_br_calculations, pss_df=pss_df, br_df=br_df)
+    # for i, win in enumerate(vsw_out):
+    #     tmp = func(win)
+
+    with Pool(cpu_count()) as p:
+        tmp = p.map(func, vsw_out)
+
+    time_out, pss_est, br_out, sbj_out, cond_out = zip(*tmp)
+
+    time_array = np.array(time_out)
+    pss_est_array = np.array(pss_est)
+    br_out_array = np.array(br_out)
+    sbj_out_array = np.array(sbj_out)
+    cond_out_array = np.array(cond_out)
+
+    df = pd.DataFrame(
+        np.array(
+            [time_array, sbj_out_array, cond_out_array,
+             pss_est_array, br_out_array]
+        ).T,
+        columns=['ms', 'subject', 'condition', 'pss', 'br'])
+    df.dropna(inplace=True)
+
+    return df
+
+# Multiprocessing task for windowing dataframe
+def imu_df_win_task(w_inds, df, i, cols):
+    time = df['sec'].values
+    if w_inds[-1] == 0: return
+    w_df = df.iloc[w_inds]
+    t0, t1 = time[w_inds][0], time[w_inds][-1]
+    diff = time[w_inds[1:]] - time[w_inds[0:-1]]
+    mask = np.abs(diff)>20
+    diff_chk = np.any(mask)
+    if diff_chk:
+        return
+
+    # sbj = w_df['subject'].values.astype(int)
+    # sbj_mask = np.any((sbj[1:] - sbj[:-1])>0)
+    # if sbj_mask:
+    #     return
+
+    if cols is None:
+        cols = ['acc_x', 'acc_y', 'acc_z',
+                'gyr_x', 'gyr_y', 'gyr_z']
+
+    data = w_df[cols].values
+
+    # DSP
+    sd_data = (data - np.mean(data, axis=0))/np.std(data, axis=0)
+    # ys = cubic_interp(sd_data, BR_FS, FS_RESAMPLE)
+    filt_data = imu_signal_processing(sd_data, IMU_FS)
+    x_out = pd.DataFrame(filt_data,
+                         columns=[
+                             'acc_x', 'acc_y', 'acc_z',
+                             'gyro_x', 'gyro_y', 'gyro_z',
+                         ])
+
+    sm_out = w_df['BR'].values
+    ps_out = w_df['PSS'].values
+
+    x_vec_time = np.median(time[w_inds])
+
+    fs = 1/np.mean(diff)
+    ps_freq = int(get_max_frequency(ps_out, fs=fs))
+
+    y_tmp = np.array([x_vec_time, np.nanmedian(sm_out), ps_freq])
+
+    x_out['sec'] = x_vec_time
+    x_out['id'] = i
+    y_out = pd.DataFrame([y_tmp], columns=['sec', 'br', 'pss'])
+
+    return x_out, y_out
+
+def get_max_frequency(data, fs=IMU_FS):
+    data = pressure_signal_processing(data, fs=fs)
+
+    xf, yf = do_pad_fft(data, fs=fs)
+    max_freq = xf[yf.argmax()]*60
+    return max_freq
+
+def convert_to_float(df):
+    cols = df.columns.values
+    if 'sec' in cols:
+        df['sec'] = df['sec'].astype(float)
+    if 'pss' in cols:
+        df['pss'] = df['pss'].astype(float)
+    if 'br' in cols:
+        df['br'] = df['br'].astype(float)
+    if 'subject' in cols:
+        df['subject'] = df['subject'].astype(float)
+
+def load_and_sync_xsens(subject):
+    # load imu
+    imu_list = get_file_list('imudata.gz', sbj=subject)
+    imu_df_all, imu_hdr_df_all = load_imu_files(imu_list)
+
+    # load bioharness
+    pss_list = get_file_list('*Breathing.csv', sbj=subject)
+    if len(pss_list) == 0:
+        pss_list = get_file_list('BR*.csv', sbj=subject)
+
+    br_list = get_file_list('*Summary*', sbj=subject)
+
+    # load e4 wristband
+    e4_list = get_file_list('*.zip', sbj=subject)
+    bvp_df_all, bvp_hdr = load_e4_files(e4_list)
+    bvp_fs = bvp_hdr[0]['fs']
+
+    xsens_list = []
+    # skip the first and last x minute(s)
+    minutes_to_skip = .5
+    br_skiprows = br_skipfooter = int(minutes_to_skip*60)
+    pss_skiprows = pss_skipfooter = int(minutes_to_skip*60*BR_FS)
+    # load each bioharness file and sync the imu to it
+    for pss_file, br_file in zip(pss_list, br_list):
+        pss_df = load_bioharness_file(pss_file, skiprows=pss_skiprows,
+                                      skipfooter=pss_skipfooter,
+                                      engine='python')
+        pss_time = pss_df['Time'].map(bioharness_datetime_to_seconds).values\
+                .reshape(-1, 1)
+        pss_df['sec'] = pss_time
+
+        br_df = load_bioharness_file(br_file, skiprows=br_skiprows,
+                                     skipfooter=br_skipfooter,
+                                     engine='python')
+        br_time = br_df['Time'].map(bioharness_datetime_to_seconds).values\
+                .reshape(-1, 1)
+        br_df['sec'] = br_time
+
+        # sync
+        br_df, imu_df = sync_to_ref(br_df, imu_df_all.copy())
+        pss_df, _ = sync_to_ref(pss_df, imu_df_all.copy())
+        bvp_df, _ = sync_to_ref(bvp_df_all.copy(), pss_df.copy())
+
+        # extract relevant data
+        acc_data = np.stack(imu_df['accelerometer'].values)
+        gyr_data = np.stack(imu_df['gyroscope'].values)
+        x_time = imu_df['sec'].values.reshape(-1, 1)
+
+        br_col = [col for col in pss_df.columns.values if\
+                  'breathing' in col.lower()][0]
+        pss_data = pss_df[br_col].values
+        pss_data = np.interp(x_time, pss_df['sec'].values, pss_data)\
+                .reshape(-1, 1)
+
+        br_lbl = [col for col in br_df.columns.values if\
+                  'br' in col.lower()][0]
+        br_data = br_df['BR'].values
+        br_data = np.interp(x_time, br_df['sec'].values, br_data)\
+                .reshape(-1, 1)
+
+        bvp_data = bvp_df['bvp'].values
+        bvp_data = np.interp(x_time, bvp_df['sec'].values, bvp_data)\
+                .reshape(-1, 1)
+
+        xsens_data = np.concatenate(
+            (x_time, br_data, pss_data, bvp_data, acc_data, gyr_data),
+            axis=1)
+
+        columns=['sec'   , 'BR'    , 'PSS'   , 'BVP' ,
+                 'acc_x' , 'acc_y' , 'acc_z' ,
+                 'gyr_x' , 'gyr_y' , 'gyr_z' , ]
+        xsens_df_tmp = pd.DataFrame(xsens_data, columns=columns)
+
+        '''
+        print("{:.2f}\t{:.2f}\t{:.2f}".format(br_df.sec.iloc[0],
+                                              pss_df.sec.iloc[0],
+                                              imu_df.sec.iloc[0]))
+        print("{:.2f}\t{:.2f}\t{:.2f}".format(br_df.sec.iloc[-1],
+                                              pss_df.sec.iloc[-1],
+                                              imu_df.sec.iloc[-1]))
+        print(xsens_df_tmp.head())
+        '''
+        xsens_list.append(xsens_df_tmp)
+
+    if len(xsens_list) > 1:
+        xsens_df = pd.concat(xsens_list, axis=0, ignore_index=True)
+        xsens_df.reset_index(drop=True, inplace=True)
+    else:
+        xsens_df = xsens_list[0]
+
+    return xsens_df
+
+
+def load_tsfresh(subject, project_dir,
+                 window_size=12, window_shift=0.2, fs=IMU_FS,
+                 overwrite=False):
+    cols = ['acc_x', 'acc_y', 'acc_z', 'gyro_x', 'gyro_y', 'gyro_z']
+    pkl_file = join(project_dir, 'tsfresh.pkl')
+    if exists(pkl_file) and not overwrite:
+        return pd.read_pickle(pkl_file)
+
+    xsens_df = load_and_sync_xsens(subject)
+    x_df, y_df = get_df_windows(xsens_df,
+                                imu_df_win_task,
+                                window_size=window_size,
+                                window_shift=window_shift,
+                                fs=fs,
+                               )
+    x_features_df = extract_features(
+        x_df, column_sort='sec',
+        column_id='id',
+        # default_fc_parameters=tsfresh_settings.MinimalFCParameters(),
+    )
+    x_features_df.fillna(0, inplace=True)
+
+    cols = x_features_df.columns.values
+
+    df_out = pd.concat([y_df, x_features_df], axis=1)
+    df_out.to_pickle(pkl_file)
+    return df_out
+
+def get_activity_log(subject):
+    activity_list = get_file_list('activity*.csv', sbj=subject)
+    activity_dfs = [pd.read_csv(f) for f in activity_list]
+    return pd.concat(activity_dfs, axis=0)
+
+def get_respiration_log(subject):
+    log_list = get_file_list('*.json', sbj=subject)
+    log_dfs = [pd.read_json(f) for f in log_list]
+    return pd.concat(log_dfs, axis=0)
+
+def get_cal_data(event_df, xsens_df):
+    fmt ="%Y-%m-%d %H.%M.%S" 
+    cal_list = []
+    cpms = []
+    start_sec = 0
+    stop_sec = 0
+    for index, row in event_df.iterrows():
+        event = row['eventTag']
+        timestamp = row['timestamp']
+        inhalePeriod = row['inhalePeriod']
+        exhalePeriod = row['exhalePeriod']
+
+        cpm = np.round( 60/(inhalePeriod + exhalePeriod) )
+
+        sec = timestamp.to_pydatetime().timestamp()
+
+        if event == 'Start':
+            start_sec = sec
+            continue
+        elif event == 'Stop':
+            stop_sec = sec
+
+            dsync = DataSynchronizer()
+            dsync.set_bounds(xsens_df['sec'].values, start_sec, stop_sec)
+
+            sync_df = dsync.sync_df(xsens_df.copy())
+            cal_data = {'cpm': cpm, 'data': sync_df}
+            cal_list.append(cal_data)
+
+            assert np.round(sync_df.sec.iloc[0])==np.round(start_sec), \
+            "error with start sync"
+            assert np.round(sync_df.sec.iloc[-1])==np.round(stop_sec), \
+            "error with stop sync"
+
+    return pd.DataFrame(cal_list)
+
+def get_test_data(cal_df, activity_df, xsens_df):
+    fmt = "%d/%m/%Y %H:%M:%S"
+    start_time = cal_df.iloc[-1]['data'].sec.values[-1]
+    data_df = xsens_df[xsens_df.sec > start_time]
+    activity_start = 0
+    activity_end = 0
+
+    activity_list = []
+
+    for index, row in activity_df.iterrows():
+        sec = datetime.strptime(row['Timestamps'], fmt)
+        if row['Event'] == 'start':
+            activity_start = sec
+        elif row['Event'] == 'stop':
+            activity_stop = sec
+
+            dsync = DataSynchronizer()
+            dsync.set_bounds(data_df['sec'].values, activity_start,
+                             activity_stop)
+
+            sync_df = dsync.sync_df(data_df.copy())
+            activity_data = {'activity': row['Activity'], 'data': sync_df}
+            activity_list.append(activity_data)
+
+    return pd.DataFrame(activity_list)
+
+# save evaluation metrics in single file that handles the models for the
+# subject and config
+class EvalHandler():
+    def __init__(self, y_true, y_pred, subject, pfh, mdl_str, overwrite=False):
+        self.subject = subject
+        self.config = pfh.config
+        self.parent_directory = join(DATA_DIR, 'subject_specific')
+        self.fset_id = pfh.fset_id
+        self.mdl_str = mdl_str
+        self.overwrite = overwrite
+
+        self.evals = Evaluation(y_true, y_pred)
+
+        entry = {'subject': self.subject,
+                 'config_id': self.fset_id,
+                 'mdl_str': self.mdl_str,
+                }
+        self.entry = {**entry, **self.config, **self.evals.get_evals()}
+
+        self.eval_history_file = join(self.parent_directory,
+                                      'eval_history.csv')
+        self.eval_hist = self.load_eval_history()
+
+    def load_eval_history(self):
+        if not exists(self.eval_history_file):
+            return None
+        else:
+            return pd.read_csv(self.eval_history_file)
+
+    def update_eval_history(self):
+        eval_hist = self.eval_hist
+        if eval_hist is None:
+            eval_hist = pd.DataFrame([self.entry])
+        else:
+            index_list = eval_hist[
+                (eval_hist['subject'] == self.entry['subject']) &\
+                (eval_hist['config_id'] == self.entry['config_id']) &\
+                (eval_hist['mdl_str'] == self.entry['mdl_str'])\
+            ].index.tolist()
+            if len(index_list) == 0:
+                print("adding new entry")
+                eval_hist = eval_hist._append(self.entry, ignore_index=True)
+            elif index_list is not None and self.overwrite:
+                eval_hist.loc[index_list[0]] = self.entry
+        self.eval_hist = eval_hist
+
+    def save_eval_history(self):
+        self.eval_hist.to_csv(self.eval_history_file, index=False)
+
+# Train IMU - RR models across subjects
+def imu_rr_model(subject,
+                 window_size=12,
+                 window_shift=0.2,
+                 lbl_str='pss',
+                 mdl_str='knn',
+                 overwrite=False,
+                 feature_method='tsfresh',
+                 train_len:int=3,
+                 test_standing=False,
+                ):
+    # window_size, window_shift, intra, inter
+    cal_str = 'cpm'
+    fs = IMU_FS
+    tmp = []
+    imu_cols = ['acc_x', 'acc_y', 'acc_z', 'gyro_x', 'gyro_y', 'gyro_z']
+
+    do_minirocket = False
+    use_tsfresh   = False
+    overwrite_tsfresh = True
+    train_size = int(train_len)
+
+    if feature_method == 'tsfresh':
+        use_tsfresh = True
+    elif feature_method == 'minirocket':
+        do_minirocket = True
+
+    config = {'window_size'   : window_size,
+              'window_shift'  : window_shift,
+              'lbl_str'       : lbl_str,
+              'do_minirocket' : do_minirocket,
+              'use_tsfresh'   : use_tsfresh,
+              'train_len'     : train_len,
+             }
+
+    pfh = ProjectFileHandler(config)
+    pfh.set_home_directory(join(DATA_DIR, 'subject_specific', subject))
+    pfh.set_parent_directory('imu_rr')
+    id_check = pfh.get_id_from_config()
+    if id_check is None:
+        pfh.set_project_directory()
+        pfh.save_metafile()
+    else:
+        pfh.set_id(int(id_check))
+        pfh.set_project_directory()
+        print('Using pre-set data id: ', pfh.fset_id)
+    project_dir = pfh.project_directory
+
+    marker = f'imu_rr_{subject}_id{pfh.fset_id}'
+
+    if not use_tsfresh:
+        xsens_df = load_and_sync_xsens(subject)
+    else:
+        xsens_df = load_tsfresh(subject,
+                                project_dir,
+                                window_size=window_size,
+                                window_shift=window_shift,
+                                fs=IMU_FS,
+                                overwrite=overwrite_tsfresh)
+
+    activity_df = get_activity_log(subject)
+    event_df = get_respiration_log(subject)
+
+    cal_df = get_cal_data(event_df, xsens_df)
+
+    # include standing or not
+    test_df = get_test_data(cal_df, activity_df, xsens_df)
+    ipdb.set_trace()
+    
+    for combi in combinations(cal_df[cal_str].values, train_len):
+        config[cal_cpm] = combi
+        train_df = pd.concat(
+            [cal_df[cal_df[cal_cpm] == cpm]['data'] for cpm in combi],
+            axis=0
+        )
+
+        assert np.isin(train_df.index.values, test_df.index.values).any()==False,\
+                "overlapping test and train data"
+
+        print("train")
+        print(train_df.shape)
+        print("test")
+        print(test_df.shape)
+
+        if do_minirocket:
+            x_train_df, y_train_df = get_df_windows(train_df,
+                                                    imu_df_win_task,
+                                                    window_size=window_size,
+                                                    window_shift=window_shift,
+                                                    fs=fs,
+                                                   )
+            x_test_df, y_test_df = get_df_windows(test_df, 
+                                                  imu_df_win_task,
+                                                  window_size=window_size,
+                                                  window_shift=window_shift,
+                                                  fs=fs,
+                                                 )
+
+            x_train = make_windows_from_id(x_train_df, imu_cols)
+            x_test  = make_windows_from_id(x_test_df, imu_cols)
+            y_train = y_train_df[lbl_str].values.reshape(-1, 1)
+            y_test  = y_test_df[lbl_str].values.reshape(-1, 1)
+
+            print("minirocket transforming...")
+            x_train = np.swapaxes(x_train, 1, 2)
+            x_test = np.swapaxes(x_test, 1, 2)
+            minirocket = MiniRocketMultivariate()
+            x_train    = minirocket.fit_transform(x_train)
+            x_test     = minirocket.transform(x_test)
+        elif use_tsfresh:
+            x_train = train_df.iloc[:, 3:].values
+            y_train = train_df[lbl_str].values.reshape(-1, 1)
+            x_test  = test_df.iloc[:, 3:].values
+            y_test  = test_df[lbl_str].values.reshape(-1, 1)
+        else:
+            x_train_df, y_train_df = get_df_windows(train_df,
+                                                    imu_df_win_task,
+                                                    window_size=window_size,
+                                                    window_shift=window_shift,
+                                                    fs=fs,
+                                                   )
+            x_test_df, y_test_df = get_df_windows(test_df, 
+                                                  imu_df_win_task,
+                                                  window_size=window_size,
+                                                  window_shift=window_shift,
+                                                  fs=fs,
+                                                 )
+
+            x_train = make_windows_from_id(x_train_df, imu_cols)
+            x_test  = make_windows_from_id(x_test_df, imu_cols)
+            y_train = y_train_df[lbl_str].values.reshape(-1, 1)
+            y_test  = y_test_df[lbl_str].values.reshape(-1, 1)
+
+        transforms, model = model_training(mdl_str, x_train, y_train,
+                                           marker, validation_data=None,
+                                           overwrite=overwrite,
+                                           is_regression=True,
+                                           project_directory=project_dir,
+                                           window_size=int(window_size*fs),
+                                           extra_train=200,
+                                          )
+
+        if transforms is not None:
+            x_test = transforms.transform(x_test)
+
+        preds = model.predict(x_test)
+
+        eval_handle = EvalHandler(y_test.flatten(), preds.flatten(), subject,
+                                  pfh, mdl_str, overwrite=overwrite)
+        eval_handle.update_eval_history()
+        eval_handle.save_eval_history()
+
+        pp = PrettyPrinter()
+        pp.pprint(eval_handle.load_eval_history())
+
+        fig, ax = plt.subplots()
+        fig_title = ' '.join([mdl_str, subject, combi])
+        ax.plot(y_test)
+        ax.plot(preds)
+        ax.set_title(fig_title)
+        ax.legend([lbl_str, 'pred'])
+        fig_dir = join(project_dir, 'figures',)
+        if not exists(fig_dir): mkdir(fig_dir)
+        fig.savefig(join(fig_dir, mdl_str))
+
+def arg_parser():
+    parser = argparse.ArgumentParser()
+    parser.add_argument("-m", '--model', type=str,
+                        default='linreg',
+                        choices=['linreg', 'ard', 'xgboost', 'knn',
+                                 'svr', 'cnn1d', 'fnn', 'lstm', 'ridge',
+                                 'elastic'],
+                       )
+    parser.add_argument("-s", '--subject', type=int,
+                        default=2,
+                        choices=list(range(1,3))+[-1],
+                       )
+    parser.add_argument("-f", '--feature_method', type=str,
+                        default='minirocket',
+                        choices=['tsfresh', 'minirocket', 'None']
+                       )
+    parser.add_argument("-o", '--overwrite', type=int,
+                        default=0,
+                       )
+    parser.add_argument('--win_size', type=int,
+                        default=12,
+                       )
+    parser.add_argument('--win_shift', type=float,
+                        default=0.2,
+                       )
+    parser.add_argument('-l', '--lbl_str', type=str,
+                        default='pss',
+                       )
+    parser.add_argument('-tl', '--train_len', type=int,
+                        default=3,
+                        help='minutes of data to use for calibration'
+                       )
+    args = parser.parse_args()
+    return args
+
+if __name__ == '__main__':
+    # choose either intra or inter subject features to use for model training
+    # '[!M]*'
+    np.random.seed(100)
+    n_subject_max = 2
+    args = arg_parser()
+
+    mdl_str        = args.model
+    subject        = args.subject
+    feature_method = args.feature_method
+    window_size    = args.win_size
+    window_shift   = args.win_shift
+    lbl_str        = args.lbl_str
+    train_len      = args.train_len
+    overwrite      = args.overwrite
+
+    print(args)
+    assert train_len>0,"--train_len must be an integer greater than 0"
+
+    subject_pre_string = 'Pilot'
+
+    if subject > 0:
+        subject = subject_pre_string+str(subject).zfill(2)
+
+        imu_rr_model(subject, window_size=window_size, window_shift=window_shift,
+                     lbl_str=lbl_str, mdl_str=mdl_str, overwrite=overwrite,
+                     feature_method=feature_method, train_len=train_len
+                    )
+    else:
+        subjects = [subject_pre_string+str(i).zfill(2) for i in \
+                    range(1, n_subject_max+1) if i not in imu_issues]
+        imu_rr_func = partial(imu_rr_model,
+                              window_size=window_size,
+                              window_shift=window_shift,
+                              lbl_str=lbl_str,
+                              mdl_str=mdl_str,
+                              overwrite=overwrite,
+                              feature_method=feature_method,
+                              train_len=train_len
+                             )
+
+        if mdl_str in ['fnn', 'lstm', 'cnn1d', 'elastic', 'ard', 'xgboost']:
+            for subject in subjects:
+                imu_rr_func(subject)
+        else:
+            ncpu = min(len(subjects), cpu_count())
+            with Pool(ncpu) as p:
+                p.map(imu_rr_func, subjects)
+
+    print(args)
diff --git a/modules/.datapipeline.py.swo b/modules/.datapipeline.py.swo
deleted file mode 100644
index e1d1686c91104d8ab1be7c0477fcbb672876d299..0000000000000000000000000000000000000000
GIT binary patch
literal 0
HcmV?d00001

literal 45056
zcmeI53!L0nb>9brAtX5A5u6Xfq<<uqnUz+fUC9P_!y4pQtYCrh$`E^{46`%;*&S(K
z%*<-F7LIKiAkZ{m-bn)?Eu_T-LU}iYS0RDmHb5~i6B5FsAs9Xwn%JR0==XQ-z5mC|
ztR$PHH0jPqKkdx_-h1x3=RVK5=idMF>{It2h|Vw17Wh1{P<X|iPmOPT@8e(g#5WcS
z{b$rxn^gsz{Bxw*SsTojo2$KAd2Ki-$Mv;xb!|M&uQ-Ccd;RW_xHcT@uJ)VNUHy2_
zYxb+dX1BAeR&DKWck64dc(A)(9aekIUfgPS;&N|eoZQxvEeUK%;B+OhHf-!V|E$8^
zJ#*I8w{Lr7^pFQ%b-Jmy;%!M_O9ERG*pk4O1hyowC4nsoY)N2C0{;voFznsGa6RR?
zzc0%({NH<x{QYtNyW{`%N5aqWzkB}g7e>PG<9`qQ-z$6uA)hn-^JV|{10&)0_0JFa
zzf(TLkp6!D`8EFUjU(Yd=ASqG-_MPN-`hVA^!E7&;dclR<r^*kT|aQ2wx=^1ey0!b
z`tUD|<oCBeJfs_y&)@j)s!um6pa12<mwfoBe7^0&ulC`i^7)nzkA3*4eD3h!K^~*>
z@yWLSY)N2C0$UQ;lE9V(wj{76fh`GaNnlF?TN2ok!2hEXsMZUGN5I>6q}Z|lAN1SL
z{#2px8Sr6n19&pn4!-h}g~H3h1~?bYfV06F;D4S)IQS5FIrukV6|4ZZ9t&rHdx5V%
zpiuY#_<e9KxD@;g@co}C6ut*O32p|5z(c@;!R;6zz6NdszYDGhi{PQ)V;C<!3~mN%
z;3(J!o&X*MK8Nyu8+Z%24m=Y)9h?V>;L+gY7!ck9o&}DBE5TF1Ht=<f54VF4f|r7q
zfTx35up68M9uCd~{{utD+rUkr2c81vz=OeE7(wm?UjQEluLaKn74SIlGvI9S7Z@8p
z2;L5U3;ZTn20Otcz^&LWJ^}s|{5x<2OoN{RX9KItQ?<8<&-Q44yVvaxqgHh}ZVjSF
zzuS&#gJXq)LiOp#!?k`Vsy7PubJBlngSggh4HjC>!LZb?c2?rj?984$)6+B2YFw?y
z{e>rWJMnZvpx%fUqF%k+C$K_hrAD(AcdG4pCK?=V;v2F)SeToMI{u5LtH+I~X{=Tp
z&59D0263w~6NU8C^A<N{Ufe25xmWGSonfWk?2~oz;`u{|uB>jfyPbM;5e_bg4qbI{
z@z9~my6bUt&FWgTE4mohlS7A!MovVKP3(y4mp&87n#0O&@8YuW%KiIxi%d$S*@$|r
z>afx6x1)uHB5o~f$BR+7pM4s+$9szN6G}0Nd$7Ol(4lPw<AMJ+Va&(eokqN;nOixn
zDk{Tn#Z^HB7pl^P3yXBDS&zGwhAPr=hosZOTa>|CyIt*Xq>;v>44Z9p_(|f7M`^a#
zl2FSjiK13gsWdyyVWnbigBDS1RR;sBP}kJseu<~Xx^LUZE5m-Z*_ny>5)XzJ<i4t)
zonE=x?^icU?vZQ8KFLV1sHDx@#V=QTy|_~^9bXdd!?|F^&xGrzk3buVJX1;*H9OH@
zP#*MJ&0*=dZ|0tdz&5-r9i9QkKSf@M7FX9AjaIy9$vPEFZe5E`TJN?igJE?TFU-wO
z7bLiWi|wLWE?FVT8LPr5V9)%I@_?bEa;(}~iwC9UjfEluL2(9VY38+f;b4Ew_j(^6
zmgx4iZl_iqmg<d)1`MLFbh`bRTC2y$eHxdEi$J1Qmw^J7D}zq6S2AKQsX2`eoJ_Yz
z7qyzbD6ZC4Z9s^It8qjL8Qe%3Q3n~`l^nt=@zBN*<HQkOX?2$whn9~l4Az$Aq!y?%
z%0&u{dez}DW=I@%qZ&ilFpi8`w_(|0-j4jo7_}q|QKPuyg#8{2`_~~y*D><FHy{55
za;i7w6n9-^LQ?v35+Q01qE2^ceRAIF(j>N8?RVnApdz{@Uz(XHa4`=jGbs)Iv~hYS
za#CGbnk_R5^WWi-{^I9?dgJu#-;enDs-N~1a=P`r@K39{Rr3|!SnffUPfoshwX4w;
z{eYXajCp!Mzu8$SM~l6<)@*D<>#I#VND?91=&lv}F@2Uc%}5j1HP#eF%(s}$NciBW
zR9u|fL$Bx0-eMFFYp%n<%H?$1fptT*Ic3qM^K|Offi=QLpP9mPQf}@usp)BKvi6*B
z$65JIY{!M|kvK>gaIMXc`8K#(&!98TL>gqnue1XPu1@E#Q5ZC@jTe${P7o)_$&WxT
zhGU^IKD_y6KT{AJ)W3CL8*=FOnuh12X0N_n9x#|esuq{Rkac<k;`XF?3FR;;H3Fp&
zrAK2LmW0}BTsz7PqSiH?jRuEf;Wj$auJ#|es<MA^@v2J}E0??Fl*>Fz452WiH0zY7
z+OMsa8q?*Kes`@`nqHclcaz)pNb{reiH7Z7AY`dX-H(}K+C41~``Z?mW_SI|!}i~{
zz-3ZQA~RhURJiQ3|GTq-Rx^n49*huUDK{S+tc)tBPeaLz!$E@{zv^Zpk}|<n{4*!g
zOx)vxxHE7u|3Sx?__$iEMx6#T=^FfKJ<MEY3}-otF6?$&akUeb7!RszEluCHz$ix-
z$BbQVoWuv}9<1S6>{A*VX5`Cb)P5A>Suz$Z%$D~|o>`OO&6Z~)lXX@C%Lu^MNB^*+
zo%H`dK(D_INdFJ}yY~B3I^<UPmISsWuqA;l32aGVO9ERG*pk4O1hyowC4nsoY)N2C
z0zYC2$len^M+V(axxUu!*|wPWU>eO8c21`Y(*N0-E_}o9{|Enk?f<_C^g$i`IJiH!
zi+%n(!G8zu11|)}z~$hH;8EZX_V{lB{}vnrbx;Erfpft3+3UXr+zf66H-IX5JlF%i
z#(w|D!3%+W03HUu%|8D}!8^fQz>C0@-~gBdf6qSu_raIIE#M8{Ch$V=B(MwoB={tI
z{qF{^0?z}#2Abd(!Nb5`v)?aYfH#9zg5L%=f~SLNPynA|pa1>fec%OP8I-~Oz+WQF
z{|KHBo(Zl1)1U<81Mm&@_qF%`X7FP0Y|sF6U>2MOZfB4GL*PB&)!=$?5R}0Kz)t}A
z0K5ad4m=CUpT7eRfEge^fbUYLR<EHYhUSppc+yk$$F!4&RgG`6g%A;gT75VhJ&yn9
z(hYEa5K%j$%Y)LeLW*5r&KX#hRB6T|u+bRkZjXl5qp_&w&FvyXO6>v<miSR=R|iMu
zFyK1NuaZyXJxL^Hl3d_k`(g>5a<#KjnzsEw<2t&)XQfmlJMNB!U2}&6J2}c?#A4`a
zh_SS5&*5liG?(PBSd>(SnG|n!HYaH0*&KALol=rudOk{MB@ffw=2D>~(O^fSx7djQ
zXAdt?kZc?$*AOwfAToK3#Yjj+kAtR3Ac<pptJ=1O3aqcMvVr7SUs!Nqw5MrmE?apK
zpCpik_B|EYc-@Xg&WY^QZPU{4a@nNKZ=`)@B8|7v7H%PU1?0<}w6`V4;CM4lN}9+p
z6Lqvg#@$P_ah;~jG=JZw(`;vRt=gW`UtI&3J)CyhG)2O^)w?~<m(E&8lDn(>d^EOT
zRP)+nw~c$gRdG`oM736TAddz1`i62a=sQ!sq`}TXG}!3W6z0tHlA6r)?RdD_ttZ=;
z=8eE^)^c~PQy+LIA1e?$b&jaM(A2}uw8;CNz4nIO6!bu!?UDUfOOqdvvL{$cOBRqe
ziA`c!5pY&WBL;%(e>Yp*+L9xzlBzSz+fV50$IeWr%a*>4thD2lFR|mz7nd9}1JcB@
zxe~Ic<NYlzt<sEAK0|Qzcyq8Y>$VT|EW>}j-|exRC&vqwMqifd%|>H>{%{&!4}$!B
zx$uz6mpV7<B_&l6ZI^daIjs&v1($>$RasqSSrx0gGKD_PGPu@}DTM^QJz7*Nm)A+X
zyWSa8+r3t-Mo+cNIi(*zV=eB~*siM9`dvJA)Qco5c5pN9BiV+8{Kp-x627yc>N}ge
zK#8%{ez(Iub?OEso_zzW9otiWs{FVvs`j)U=T?i}@y;B)T>DZk*{2rI`y~}bPajV+
zc=~Ax8tAQ6R_lG|#5Kc?UlZrNg`%~;;#daShIBrEC(A{<_|Pm^&iu!B#KQk&+}7&N
z+ORZT?$O#a<GYJuV)kXpWR>=#;I8PMSC;50)^CccG8`YQf6d|SsNW1OIAQrm2{S$f
zh7iWZM7vbJ4~!~Hj>ut{)Q*11@1%!9gsja|(#cQ0T<n@AK9ibYXYfE`_cP;gB?9sB
zTHG5(mpu8>OZxq8Kb;5^I8He@QD%v0r?K5*S9X9K(n4|N+Gfw!k7x}08!6F{oHBZe
zdMCYtqGsBQWGs1v%CvCB(PyN|WSv9P3c)|H5f#e>7UcpEa!z^4P|DUW-OuG5Qlun}
zAmN)33<*NvG#5$at_2sAU#5f#om@m;yv+P$^+1clOR%-kuJ%f-qy`KR@wJ(e94Cn?
z8|3Q|m8`vTHI)Io$#IWO^9XmkIBMfUXM7kh8m~`9G_-)UIr<qEU*5HLht{<Jz$KN7
zo_zUYDkh5|+SbzPR7=IhAvudi(R)&e0ezA<#FzUH(oGv}!J_b=Z=?q|dhw)I8uEb#
z=^wkdyg8+$HD+oesc|)foS$&QKw2fqBk>lbGr>pM`6XM!8u5#D{>ySdZ88=2@;;EF
zI=;L;Nj~z0J_#+F>?9tv1~aFif-=m=!g+XGuggh7`v1ex58vzc|7EW$eg$3st>C%f
zIiLnE1p7b)K7*eBHgG+tgU5q&z~7_ae+GO6{43A^`@v=4KH%Qqv*`G50?z|=Py^Ea
zi{P8+`tJd+1uq6K0@r{Aa1Qt@bo~#2H-Z&#HFzL61N<HO{b#_Z!E3-lpfdo|padQY
zZbiqJ?*Gd`zW;Zi&wm8m07~Fz!JnbWzY)9v{05M(|JmRU^!Lw#&w#gs7l1?H0`N%i
zO?3D7fOiAw_4~mN@O||4cY*7`<=}~62e>!*7<&4f!An66>;ZG&F6zVTB<Svw|EiYt
zNnES(dy?<3Tyx`kNnUbg{f7gn^S#!O%rujIOfvixS1U)lO=jw~W%<j?vCpO4VI%Hz
zstCDkEtWFW`r0{i*)S0Zc=udmlK*himFHLlX`E}j{SBw7MC;Ars(sbCS_rXe<Qxr1
zNnH?StPq|<<?VPKWHrt3EU>0Qvl1SPqMUnG!gP4cJPfHakBljGC^^~%v6;L3*i>e+
zuz8a;l=oFfwwg%of$Auyk*mD<U@<USg-MK&I?Sb37>OrU<c`XPa#&uKL#SrYwB({t
zm;Tb|Dq|;3?{Cgog6SNyREn%B2{)P@2kGpae-7chQIaIBl|o4;mE*!Ix7wmA%(ulv
zXp<YGpW$eeYV8FQ=05&;TJ3|SW@;)@)G*TwhHLc=r+&+Dz$(Xd9Bb^^uwZ;>qW-4M
zLN1$4)OICf)fRWw+A;3F-ej~ikAcc75Xy4`SF&Ig6gKt|Y2Ri=+J7D7OA*#CODs%c
z{m^+94lIPhWu&!FA1Z`=#%Jw>mf5??_H&4uSDM~w1+2#{7Pzb3wLZr!=B9CFmBaO5
z+Wb}(TWngYQ}9^Ufvo1BO9{1pQW7VrQ&9@MTRbT`WXPY*WHL!lS}MBZ4fW#TC>2IR
zm@ihP($1vIF$gFVQ=$Ctk=&F&&1@;B5N6|#se+TUO;|Xsde3IyvY#-;F&B0Fb&ZQF
zdyAQ>r|CBbm9=3lV~cTTt6Zy(dxgM_KV*`aI`mvtge$6j3}R}OnK@5~q|p1jS|wX!
zw4P+MCsu(K4gz69)9Tf9e9J?<JvY7Dk~Xw;j?oyO3(g3XVK;PtH<^U^>Cm5Vf$8&5
zuI7MIqm&NA=~23w`n0p9%QLmqk5}U3!D*WiV>%Ei<$g^h7S&z%umcF1tX;O@+}^Z?
zp|m}Oa;QBew2cLxWIe#+6=ay1<HZs2^iVO2<{G~9#O!sCDwcgO!^kzzawIJRS%-&O
zU#DknL`|P@ijQ*M(yFzQm50?^Mv4h(cT94;>WYq(%gO!kD>b!q`astz-P%UI5sRlC
z{t9z=IvguO7}(R(AQO^U%S;8YK4+fynOR?jl+(QEjGoIJWX+hyo*4lAfgmZ4&K8mi
z+agtKHO*J2iWZh(r;M1)k&|K;y!6}}mZlY!hP05htr}n1ddmtsvn+OxZLXxzKzboc
z_bf7j*0pniVk1km(j@r;^~kZ>ICQOk@lbsyW+%^PaylXv;P`wsO4OEkDr7-f>Z7LM
z%FP-&dL+jK7CC}yLHw6ZZ_maFSd_FOiSOiBN@uMnyJI;qP&%g7ev(Hbl<{z1C>}@)
zyHH%5oh$lff(BgK^UZ$R#+K@b@YY5$25bD)86XY@aD<t*=B7=|t^MW-!d>pxH)f*c
zuqf8aHyuKD`i?)&=gyBVI^n3jmh@jZOFGuNgN0lI@ZZzoOGV0BZIkcm!e+ar@;3KC
z40C(fU&vY7L-_axoI*C)e)B<&zd76g*ZKd~dHp}!ulGfC{I`J)*aOZ0_X1x=$A33?
zCirEr58NB*{Qs@!_OAiY1Sh~gaDO0u|5acEJP8~CWpF=mD?0t#!Eb^U@EC9&_yPL-
zE#UXTv%oKap8_98hkrE~f&q9MI0t+cUH)dU4vv9y!F|BT(C7aEJQq}f_VIrnNY8&5
z_yKzT=fNL>-vKv(>w$FsUEpiz^{)dhpmY4Q;DO+;(d)ko-UoD!{|MLx{sx`?R`4G{
z4{QftMxXx_csBS|a1h)Vd<dEUF)-Or*6-7&>UioYwucqQ$5W`DHrlD-SClGje_5h(
zW=KrOj!fKi11^cF^(XzqHRD7X$UUKTrPM5Dl3M0&oe<WJdh$>t)7Gyt=7NYiPUIkI
ziigWGm%?v$F+W|V(Q_KPAVT_#kA<V;^S^gW4el-<$*$=OM#=w|k3NaJ_5j`(v*UiR
z%W;2><9Vib&t*6KMbG6$8ACA`9r`_qq(l(k*;L?!a@!w54t%&Uk~OxS*x;1hb-Pb+
zm%(tAzxC?Ib?Kphp9chpylv|0-Baz|Q}vPX>IQ+DW5xx2adm^TIFT|hq!lZ|mgS~N
z4~U`76N~Ll(_T&B<fKefMGMt{u1hD<IZD5vI9do?oy?q_wy~s%6Z>MiMt)DMU@{Bc
zEUE2!V{%V(iy7Yoys8nn$ZRSpPk$^^^o_~Tg-4=i?6hSU#nk##_Lexe@UbU8w`}qd
zX%c}<$xgyR5^E9(wNu*#u~8<=ghJd7h-2GU!OX+<%yxv`uKw8UDbre}(4!(#u!QUk
z9it*4FjwM9sq+Q$F>F*NwUOP7vt&VMTM_08qpH-$)kwxV;-c7`owhrvbBvBiTi+z2
z(6h3>If>FYAzl(@o7Dx&oBz_?!>s*Np~hIInwE-}9Am4sc$n))7PX>?4zi<^su#Ho
zLbJ%E_bvhj(bgvvWJ1)X;z3G?2--c3hZ?Ibw%J^!Q>l!y7)bf%tjx15Nx9~eSfzzj
zc`ZbV2d0o@jL?~fN;PU@b-^PB*7Tv4bt5W4MqilWnum;KHMA=wS4WvVgk;*gsQaH2
zO~fj_|1{VNTern@Uc*b(`3xb*E^$3YP~Ea28c|n@rqVK`rh1;T$;O)?Di=H=sx;k6
z4bl6g-(^JQOLe!6&>0rfsUY)kQWa0+JAIh1;ba$NpEwS+TL}E>FAYGJRb{1>QO-!?
zHzv}dN(ug%?16YO%a%3PvP~|MM)p=9PWGpF1QirTA6lUGs2D$`;h>a>;uSqb$~@hq
zeg?I;6I--$yLWD8Z^J6ZPtATW1`^4Gv5uAtQKgO!5h21z%g;XEgvY#@abyrwdN-JP
z_RPYP-+56KF}xXD6IeP)Hzda$2&y0fhtFv==ET;RUht#T)Z;(;UfoP;EGu4<t>c`{
z#%eq>EEmLH7>pA!kF{B!EO$i})gSJx8fz$`nhTv&D<Q&2&1BPVUPCPOwI#Q+SXs>P
z!76$9_(;n9wD#=+N8W%J{QsZl_5W+U#u@JadkZ@ME5Osiqrn~M`5y!S9<;y|_&PfN
zpMw{J7lBpq2=Ha}`!9hHf>(my25aCN5P>hF=f58OI;emW_#`_1hrl{G2Bi0Y2mSsJ
z!MlKT{~N)1;0NgUw}6j;mx4Z60=m!dFVXGa4w_&V{2jXdT|j#NuYm>dbKnu+PW1V|
z1^*4a7+ec>f;-UXwWt3ka3eS$JOtc=PX99SQgAJJ95@Gj51sz=;C<k=;5FbmU<u5F
zzemP*fd2xX4@~BXh94?4SnM@?x-k~b*|ZkvI6)sPgy{O4gwhI@ioaxf4Q8|A)ycPO
z+_!XjFs>Qri99v#51YJ>QDiPhjqSUyi`A9paLD(Rl9mt$S>l-Ulw&xrnx)UwNVCOC
zv@<J&HzWHpOJ!2zaU}s$hN%H4W$DD~dar_l%q9kg*1t*GL_f%di#tajEg@OWrFkO(
z7o61rF#GB<ld*s?2OVe7Y02C@xxgB-PG-q*;VC;-k<Q=)zut>lk+8nxj5VgA<erS1
zZyfQ|a^vX~Bolr7Z)#TOLF}fyC5b3&L2+hR9!hODg5GR17_lihlKy|KSFt^WG$NYW
zQOuE1v$w)n?O-ESWVZ|J_g)ud4P|{>2w5n-kqm9p2x6kLhtgXZdu7B)ED=f4Oimfo
zO@6DEP20-wTEAJF<tNz{PRMPKs`VplZ1}0bR~@`42^e1M9Wr_w!)r~R*K*yHwUM&8
zWkc1mtH;HRC|%~+f?3pLKR1;^(o~3i@whQ0VI}gPykRQ|wno0W0JEIP3OFv}RPi`9
zsifwT?A^6gw7)ht1eNA?my#&DGYW6@^tbsLusfNgRl$<rO&J|;1WM;_uhO^4TqmhU
z74z+zCO9<*6epFI@*ewy0|EK<nWAObbWY69`9J&E&NM9R!o~gylZ{144+Dru2!vJZ
z^8UtGTm9)o$Mp1){^TYIJ#gn#QZ-G_ppgpyf@trK$ISAtpltNf2F~Iht|469I~%QX
zaU3>LW#`;TlI8Tkc6<t!g#S{Ce!9_vT)RsuCP|YOLFQOQrz%?u&bFJ&RJo#z*W}lr
z^IK{ERF*aw#dvY(k}CS>Of>I>I%=x+S>+pmt7%)iW@bGxJ3cM?h3d&>igfln#cUBG
zT2IZ*5T`R|gwzs1u=s~(Mls+0azUZ|;h|_8WHhJB?wP?q5VDLXQ8$CVGjf}Wp6P=%
z^~p8?Trjj4K5hHqM{l<erIfhtp4mQ|@@`t~vq^twqkT#&SrsO|^AyvRx?s{rQaY1Y
zQ(<}Ns&CTHScq|oKH{ROS>5dp5^_kpM@lT;H`3>!>85FasBV!XocVFe#-E8hvYKBi
z{?p$3N+o;udQh6~ZuXxv;Sb$=0;#**cclOSG&<;q(MNUvU&HI8Uq#pd&*1ssFgPFF
zj;{X^@F(E);ML&SU_Uq$d;*>SL*P}Q4Hm(f;H&8Rp98mn_kioc5zqwZgEPQ;(fw}%
z4KNKJ1a3w5*LnZn1HTJ4z%POsa0mMTpM%eX_k!O8H-IJZc<{5}d)NWK2;K%>3|<7D
z26ljHptJrrgARBscno+TxD%VeC%_+p=Yv&n8Q23J0v-&$fqmdl!COHa$Uk5Q_zL!c
zH-OiJe+@1HI@iA&=xqNVVJG-?a2zawUEoac7uW}Mp8qD$1r4wl=sdtf!PluDtE0qT
zkpH4FP#<%o?{epO$&86;@9Ra}^tjtUU3rb|nXo#k${wWqidVC{V%1}DB?OKQr*7H3
zZLL#X#`zO(MmrPCHcw41=JNmHl8LBU_cyPKyMq49eu5{PZSCpMY#|#O6l+gv0jHwI
z3X#&HD7HPTFEtVx@-T1_bJUW!9m4HK3>_!phS^-KRDf-X1OfKP1Sm>rc2KjZIcK{)
z>H1fCokfr;YhSYa)+&&81y5X;u;JqHOr}A1d_&*DZPGgWRl~Qqc`SR$x_HXQBKgQp
zRqDLvj$Tl-9Xn*-*-@4>yggMDsrJ#D`0}xCl~*6tI9B<g;$;3$7!M6=G~<3ZxYUK5
zZM_{@Rz*ZyEri^YAZwAM3*I)DN3iAM_G<BD86$6-36<{Aku#B!(Mdn(7jFITYj?VQ
zEsda6;%yd%kkD8k%kwnEouAo#?~&F`AF&-c8Zd`P*XT*Svw(Z?IqNs<&PUt)`Xca@
za3YS38`ttP2CvX@d=lO_5$*CvXKbNF{f?Djtcnxz2^HP9oXcHN<Ue<G>?tu3iFO23
zQK|HlLLH)lZwmo4QANFCeLdf$*VjqBUTqz9wK3juhuxkISC7>1xKY!@lk)GsCEOyX
z5Qx*`sEC?%%bxRcPQde6gcf$qWmT_{mYBR}B((E;+7Rix!S&gZN+syp-$?Ly^IJvd
z)V`$W#EP8M{YYwG%+WkKla1!wUT?|gGLf0(pQldl`3Je&@<BeY{KJKd_Z{3<xp@DT
zV;M>E`M8;8Zmc9jg+>uLDTW7bRLk=nYFm|^5Akyh1l+fz2;;7)jED0&Qx3DvJ@g7U
z0$s{{aT$K8EqA&;;=^SCF}Gppgny_C^QhL;k_?^xrur2I^iEXu`LEe!`1q}ok@2#f
zo31g3+WZuXSm2=skW1b5*^$qkcVRGsWa`Y1*7ejgPR!MLr-*y&M`<s<X_{BUTR`+4
zjS5H29TOwnEQ~H8a-&sUN&7GEwp<p&DR~&P)9zGZnWN(+UZEmW3Kw(?{BWxFNQilO
z5G&mEk%{NXTKUECcsVRL3WDXn!P^H`OPOnklp=)7c+jf0m+RGNV_xj_<N8`ns~b#)
zcmew}`)N0$keMiH@kCUg^0F2>ZhW$@6<z<&sK%ulm44jj5QpA<G;V~m#E5LjH7ou;
zF74bIjm+5-%OvyPB)a2<!16lTcGn$S?z`t}u-a@47p%vG=X#Rb_9oVwEdhqy{@(3Z
z2CXKyR#1)8p3N{>tEQ<w-?NXa9VN2e{6q?6K3)g~lmw2`ZjNWg8u&c9Ty0Pb0t`H4
z$B`a=A<2mQn7ZugGg9D-j?cibx{|J6wuc6`9yeE3?X10jF%~nHy;{a<Ne7j3cYDyi
z(<YF~%8vEZ`_-?yt@L6lL{!Jyf%3W$Y`Jn&?Ql?*d2_(@*e(%eBW;W$(HN+8#vEq=
z-UpP6h{ZI?e}^v(YyW14kwc}8IfEt7`F8cVu~5}TB$*Vf{4`qO&N9HC9!~>BcGA%E
zfiEZlmhp@a%S`Y)Bv=(8^25imI6CUGuB`OqxW2)a1$kN{`CILnJTt6?f~mGWxlq><
zKxV8nS!IVN#H?PJ<dPRAp$9l;f1!Hw6A!2sN}Wor#fA*BT1XsY)B4@Ldu%nB20|_U
zk<T%-X7##I^WhmSd~#$0h&vihLKvBrGEo*D!%&`bv9NbfGEp78RMW*3&7N~e<GJ+z
z2cb%S%j^Ho@M`53(D&Z~{u6jPI01AAfP4VX0$)Pk*ExWffK{*po&xp)y$|rAKxYE(
zMF0O9_&f;qft&fh0)8IMgWaGE3g8FW0zLuW0p19n1D*z+0JeeeU>Eo(co|p*X9JxR
z(0u^U1qZ=n!CBxoYyj^DzXeu613VE-f$yUKe;m9L{0iuT4mb#Qff;Z%xE*_d?gD%T
z_$6=wxEGKw!8^e#!8$kqwu5hC1NaztKX@Pb9iTe`o&du6fd}#ZOV|Rw2wno#Kpp75
zfC$J=@Br{l>;iubWHWdp(0c%ng2Ui^a0a*o+rYcP3&3xH%fN$y)o0#B;C@AXSm8+J
z4DOQlnyG;^7^Gv4A|@aICO;aMyZ^%epNV$tIJ&NlBA3dx9>VcXySzn99JH57S6R5n
z_L9Hw+P|4}k*SQsV>orM6=WkQJli=k&5y)aw+***40k9|#a1%;(v7PNIWjw)U;L@*
zXg%cLTsZlB;CMzxl!Bd_QHXQ4lZhdiJ$!DiK%B&$&5V5suaHi^bAHIPS5N^rW!FM8
z$rdtq8K#e!wAR|CIn(0g)++tY+fY*5iCF@&b|*^@SGjwv#7+1lP078&%)JhUMkM<a
zx5K&C>E2qF)+g^(Ip6%zOg^^QO52EgpP6e@S#w3sPBVAd|J+vDrpuHHVNs(#lI<AE
zI!rU3T5Gvg5c~Y4WV%>Qcj73a4sSREkbg?}368SONA#fLmHeeG7cZaX%N~81<I7%s
z;mwcHx%#q~FXvrn+!#SltDA3sDUhGQ4|{FKkG7X<<T$Kzu@ufaa{Pd)f!CbUZEG>l
zr`^;=WG>D^9oEa4C$7tkrO8EiKipDsEGDn!S>r`Gfv|prm_v<1+Cj;lm^TLGCgEa1
zn@IN1w(#6R`+_mfe+p3e*iM|=m?_$km)%oWM^l&0PaT+_S}adByrnDOD^HtHLMb6b
zhbop6Cu(J!stb36?0&~!G!<^yVv=r{c;p2t8;)AgJuJCrR(+q|yHuPnE-cizjgFNp
zf9S$u`|z}K!BC`5koxP}#RDU4KM}YzgtTI)E&n>nxTYHxLNwNDwGj$Dv9kRC&{~s9
zqs5-Q$zt8c-ZXmTD>=e7)t;&!oVsl4z|`V3+Y+kQQ1FNi^*ClCKWQsGd6soMHJ0l?
z+oF-2cmJd0e0622U74!yn!2nqb)Yh}IF@bYo@Pta)0;JCbSQ4uRD0J{J(}8Io;pyT
zTBP@7_A69f`lx|x6jdreWFa;0bB4z4j`i|pS)H!U{LbD0>L_t0(M5qw_Eh(Q3YK`w
zCi@cEkjA;3L3kw9q56q)uiGr4hNXiz+1N{liwCaUTw0qH@4We4=cd~jj@(h_=jl0h
zqmjSG?WsC@IdYBL-E4T)&c`c-F<bTGnq7L++sM#A>~j2(IZlID7<a-gFP>jpE8rfN
zS|v1mC0kYQW!M*Aa^Y3KFcT#g;LSwAI?0x$i^4^js1r&$ILd43`kk`Z7;!$b7iy2F
zl=-;rF7qADspT7#Ph)Esdk3+52`_F>nK$@)618S;qulI<eEfxYfiLajgzwx4=m|5D
zKXz~Up_fI5@7ga4-?@;dx1o)~4zGZ>qE)d_cnZ385)a@?jGv4PY^-&-ze|TMeH3q#
zaG}Fz)Y1Q)DD>S^Z1dtiz2O_1gT0^6$G8GIocZsqvDviN89EYOY<lcQ1}b(B32Qrl
z0VdzLe8t!tiKyGlRE%T~YGN#R+9ZkRe!7^0jkHYuecgdXU?%PyYjTb1Ow>M#S(iV4
zKT4Np+RY`f$IF$mj<AJM2y!qhT7z?2%68ZY&v_y0Q8SjhyK)Ju*F%_pU#g0eKEoT-
zF%%kkU@QD~1BtDv0wJ0}uhkv4n#<+hhQ3+RTSL?TQF9A_=Jo%M*BN#H->1Q+!0W+r
z@MKU14*_R_`+&Ej<KG1O;L$*L0Dcuc{;$AC!K=X#908XAoeR)A0KS1P|6jpd!CQdd
z0ibgMyMgWnxG(qyy8Q=%&ItS}cp7*(_%U!l@L6>H_k){&bbq}M;3+`w1NbgF{<pxN
zfDeP4z&eOQ9n678fv-^b4}cc}ogLT*9tj=*?n2N16wvzs-U;3TUIktXmVw>}@W0Xd
z^)7(7ffL{oa2EJJI{x>-r@=eGb3hH82a4d);C6KUo52P+4xR*l8hjhQ{^LM8{~N%K
z;0EvnAf5kS;1+cJmjT@yD4qZ3fYo6}=hy6k?GsP_sCz8#x2nC0O!YIIv6tSt%|GOt
zNnVwyTf1fKvr))S6!>|NabGvyWGQU&%GqobCMvu`n{@M%8`o%@x*k`TVbMOx&8+_Z
z$+UA0U2x;FQ`z!yIE8gOuNoi8>oigoB&BR#aE=WFg{h`1w=#?}d5piS+)_(955>tR
z&xTA)@3PNF%Vr8W?VapkKXuy4#d5UWRUDq#LC{EyJ=wiR+i7d2hig<VC4(w=3tN{@
z^#{@MA8#%7vtmI{MO8&{mS$gv#+VJkz6$eiJa!6Ot_t#1tpl$9E%la(XPT#)$0okx
zJuwSdg6+|M?vB$t-g)1;cGa|xHkg+s%YL%$HH`MmPT2@cuo#XsA+v5vG<(OO6LRi_
z%dU8sT;1zhldQdgC2T&W*DeKvmv1a1^<jeYJ6&mwFwbi5T~lDpm2eu`U7a8dOMx`P
zQ405fi=r{R<&v2&a*s%rscS1Mbysb*&<(D2XQ*v{F=DFR5p7J})71%I91E^gY*TWH
zMF|HpYvD7|%0}P5;mbUF(RB!N>6rp+N7?jap<E=S%Z@@%e!R}tHZ{cs(&MSjt1b1O
z&ZTsV&WX)A3Lonq*|p2`gqybWOuHj2V@!|lw1qtM_q6iK7c-mBNHJYrw#*I2DDA`B
z0?ouZQgxxVl)7vFrjSBljwj(|-m2iGbtxyKr*5d{AvT(H2Z9k_HxN~7t4CA$h9|C;
z^d=Q^_D{0%?)-{&0S3FwZkNB!NK0_ffR1J8Tt@cMtd3@++Qtw=nZ0Hv6GyKS;b}@I
z|6UW=cI6A^lG*u;bBZh-O;%(k0aKJ&mzL6dD31-+Y0+jcTi`o8<JDeFh6r2IM3R)8
z_l1WlaT_kNyVSxgBn-mHcqSq>gd~Cmu6!A(EH_LW$?c3xy@;#$*^tyekBwy(;v$jX
z*x}<wMQipk53tPC;9`(ASpb(pqN0xbQkIcxaZZw=Q9NO)=b)ki?7EO1NoOACZQhoF
zGMPX&=usFrui&7`(0QK)s(E|dNs|wjk3a+OHCG<-vwS4aN2n)BwW4KtDZ{e2BQGDx
zUAmVH@D@KQhzjFpD(*%_vaDj<t3SL|eZCaz>pRWNJ)JgVr46QUt+`5byJUZ}%?P|8
zcC@B<rhX`j94L{E;=716Lt+*b9zL=-n|D>lebF~@W+hahv}K2-fosUfO6fcYviI%D
zY#^jFvYX7D@g0kliNvvx&Sl9wwvd^7vU8eCZjI4)thvaJc|Nu3J7qmT`Ma#<Rb{mE
zDpU7hVQIFEd3?5Pm&6t%gQTwPm)4c1vDQ<<kvZEJc=NTHOJl{S9K8O)Y^F}eBwmR-
zX>GgTMAN!#lIZeHJWqOafh*qK5IQwn?^d$9cBX<)RU@N&dU=k=Qrn6pQ=xA6#qCWk
zr290ZRMlaVO}Tu(RbeN8GV3vQZfi9*X4=h@VXww&%D0|054RUIDi^n5<d)T=4T}43
z!w!&LS*BkFF{Uq6@jco+O^RTYLQ@Q#BDide?Xj!>XCj+=Lfi@4VhQny>s6f$noy$u
E1K=Ub761SM

diff --git a/modules/.digitalsignalprocessing.py.swp b/modules/.digitalsignalprocessing.py.swp
deleted file mode 100644
index b0450c45748586ac7d937004bbbaa7146ae127fe..0000000000000000000000000000000000000000
GIT binary patch
literal 0
HcmV?d00001

literal 16384
zcmeHOOKc=Z8SVs@O-KR}aDgDGjRo#8?e>gUR$yR|mfcvpo3#_x-Ymi~O-*;rxZ<Af
zc2~DOW0D}DM2c9raL56agj<9Jkv7~wNE?9=LIQ*o2yg?D3xeg61CQ*6uO2-w&p2Vl
z1?ZN)nd$l;_5a^r{iy18v-;qf74lKHYGJ$6vU)$hz}%}xp1SXO%Sw0rEl#E2@a<t5
z<)Z5Ht;BcpOt>t_U78=Xc}zQ8NYeOW=4WDorku{FOe8#|8IPlRpN0#aILJdL76RVp
z84U$*M>K?q&xGJn+f8~0J(n#d10@4*%)lMi@x_`t)MxHKM&A9d3vX;vYLpC=43rF%
z43rF%43rF%43rF%4E)b9kR`WTUqm)Hn~XB^ebd<Y>t@?G?Zx!I(7SHrp^^V}%>J5@
zGt<6d3=8|$jQk-ZzcMEOr;&f&$lq@QF6{qd<dKm-GA94~nEju|<Zm%{cbV_ArlIg}
zn#-?}fs%oefs%oefs%oefs%oefs%oefs%oef&WSdXrROXvz+MS`F}D0fAyV~^%8Ie
z_zutmjsr)4pT5Jgo&_EOz5rYV9t7?KuHRu<*MOISUjt79=YhL{W5C~Tx2#uyp94<<
zJ)jHJfm?t-+-6z70$u{X1v~{j3M>I120jG5@^;I*3|s;p10Ds=0`~v}I12poR?GSl
zAb=E@1Fqg;Sw8{34}1@J2FL&bd>l9i{Pw72y#Ra#xCE>L9{^r{n`Qk7_#yCZ;OoFw
zfro$^@E+i1;3nYrM=a|(-~wO+w*$8USKn$`F9P2Ho&X*P0^lt03E;iJYgl*q4e$ft
zS>O`zWq<=V@IK%g)-HYrya+r4JPqsvp9AW^$AAw4M}fbfuBx`O%*1TnMbaPv&FBXI
zNX;Sj&4HxWmyEVo`|b1EqHn@N-Xfedv_~&w(R{5=nkl2(eVsz1S|@%SWjxB6lC|QL
zaEI*Dkno7GDDN=53Bqg{Tcxf84tT5O(KJN>QR42iG!|aSw~dl=Bh_WcP{F6R?)NQ)
zCU_U`nrzX8*)oS(MX4aP!p2p?icF<e*N!t-))rbq?aj{462Xu_K;krDDe+m9rHEaI
z$Wj*hjAXPIA}PUA&IF+eUR>!C7+L{q8Dmcwd(udS>X4Qwl1*8br;&0fbJjr^0ye6f
zGLK@fHmF*^jTaiqkYJM{SlN=|c~kTG+Gb@T$dt}#1=N_R)$5xkebW~lOr`z+Lrv;$
z@6j|M2^FHBdW)wbdlQ-ad(g@=<f_6my9hw+?0eWVjLpr-KH5UpaE5?gnui(T=&{||
zn{|l5yF^ShX2ZB$@9>CsXsC;j=FucaX0#OrkRz(wauj1;ucd-f!)&M#QAua5mX69x
zXrO92P)3X94nJ(X$>fTioUwgoR~%wj9Z(d=af`iK3@ee*C`0JQPY*@QkwaByX_S>i
z8VIkIGI)#po`+!_hq-)%%9djp9l>KuG%&8zxC>$#n+g;%B|5Wm!CPJ%8+I}`lWSLw
z8e}Fru~(h-%0_*4VPR2Mr6+}pZgpXm%*l~Ko5C(wsBBcVrVhHR^AC9>66!O%>LTsc
zxpjOvIN*aM=26yI#(Q2&8?3{if5fd~r#L!wD#L_NIB<|a=8%0=0Wx8W;$4=8G-+(J
zG-9DA_&%E#S;_;ZeyfrMENHB!xe3U)L?j|Jl{Vx$$epEvwTHbqig6j{h`K|DdTcD!
z7BNBx-8PnrN=%4ZHH{AVX<D?)MMHi#mQ*o=IgdCF^N!czVaC#0ppGGrM+gDwH}-fG
z#Cs-vynV+SLxWNIQ3D?*Xnl%z{W$5_x*VN&7w_azkV@P7jO>sRe<~uBydVGQafZQz
zp+Up88zI622e&+FhKrL#qi%XC%$}@KEM%HCzV*TDkUlATfI$MiW}blyuSqckah|fn
zc+bx<bhdbVOZ{lUUY)2L>QkpXW@Q*PVISgR$wW~t%4{lwSXAW0G1;m=hit%~j%O3r
z;3#hE<HRA&te2olGI*RWaJo$LtT78GvpU5Y(u7Gr%ZVO`eP}D0I&2IBWPwDz9Tgn{
zLAy?Gs2O53SKTF?1aO6vwHo8+Fjo5`-cWJFC6w{1627U@722bvn_zk)C6jKZ)Jlpb
z)=CN{vOb+&X)-P2{}`<(_sD^9*zsU=M>*BwMV=~)1L;O6g_kLeLNbw1wEugf6$Vo>
zRb;l(xFavgE#4-)lf-F8R?e(?=kHy6z&n3p{a%NxocPRO*P5Vz;^awOY#pL6x(-=h
zllmuC9ykZ<Po0NslGR1@vVQvhbEh1FpQrA>=&hakEDVaB(`S}(8D07Gi9$<uqYNzV
zPMEumq*nxv8z@3WaW7S<ZR6&s&D<8FxcOv#-56KM)Z4;sD6S`9K<$bx?qzX@3f#6Q
zxW)Iv7&ls8hjuMPj=#HT9Q$z?;yrKNmWaZsxlaqYkBO#YO87kKxwv~MRPYmnM&Ef1
zt3-hcvrrqqs12*?cg9_PA?o+NMI&7DaM7g^?txf0i6h*<a~d8rEjmHjcjIuEAs}2N
zJ(;kWuC330xT@-F5UxMWN6Z<_<-Gtn+`~u8ecTERd%+zLOOrT+X3?>3s^Na}WZvZd
zT7s)2vkb$SCZVLLW0Vw42`&X_z+Vy+ZRj}(<1FM&yfZI<DRGz;a-IZDOV0nVVZQtX
z=GAikUp)W*8Rq%l1@?elpbp##yox#g72s<?3uplIz#lQUzYIJFbb$&W=lPcbImf>T
zxC?k4{agor0elmX{XPzqTggDlK*>PKK*>PKK*>PKK*_-W4Fd$5Sq3rFEcs8dm^SIB
zBr`;=Y>i1a>Ucndrx=?q6>>Sq#+0F0{gI2Vlhv!E>S}qXaF2B-EX8F^NY`fleTHaX
zBQo>kVMc~MePdle%(1N6RO?xd(cL1_>E@7No};H+js7pat~rD3`Vk@5(TS)%mb$h%
zw5J2Yxj8c%#=?0@JLWlqR2y?HnciJ=+i@H$>-n=-kJnQ?1F?Ar)6873(`p8Nie;D!
z$EjLb9)u)QQ4{N;;)BxZnYVPK*<x5D*Ux2S9ETVV=I8_C8wh<e2Y6nFoILa~h($Ud
z-I`by*j~fxGSWG&!yi>B_V8%t6qgS^@a(KXmZ}403XQ?{PEJFf_0F+f7DBn|E^6)3
zQb8h@7i=s^oK)1x<zDP~nrO2$4*F0w-Vs~zo;{n!eAdCo)JR81uUf8`>z!?udAoRa
z5PK#D(XYB>s93KZ>b+5`B1cOmWsZ0(EU>P8l;Daiu)7uVNn)p1Qnk?nL)kJfrC<ub
vD}xZtqZKMJzGx3J<rVdbFxBVyUwKWd4d>Dq76_ysmTdxeO+B8hU^V(*lPiJ$

diff --git a/modules/.utils.py.swp b/modules/.utils.py.swp
deleted file mode 100644
index 3c3989aba08152442eb3211073976dc6e7063f06..0000000000000000000000000000000000000000
GIT binary patch
literal 0
HcmV?d00001

literal 16384
zcmeHNO^oD56?T9SAOw;PCs3ryE>U}yUhm9C5y_yINcJaLWoOvT><`ckrrIv|)Q;PB
zT<+QFY_gH~l?dX%PXae2Kmi1S%>{u#LU2e9$pHixMBsoFvX@AJA}+A-y{fX?o}Nj<
z9*~f1%{OjWy?TH3>UGs?59ZGtUt)K9^A4^X9p~HMJ1u^YU4G*JryM7LFj$wII$r%c
z$CI&|_vCsOc;iBOA{={sJUiy1VR$gh({myy)Ipw0ejpbrlQ}PBnj8ptd~lS8<5;MJ
z<3h&D%O<mY+LqP>tp~2g12;IgA6n2$ec#?&+0A=SUk#IXOzVNx1FZ*I540X=J<xif
z^+4-^)&u{i9w@SFokyYj8?64Zy<fBK{%?Evwtcj%|BCJZh;4svTmQdoe`?!rvjR#v
zZ?x_2xA(7X>$lDAt@S|bfz|`92U-ub9%wz#dZ6_{>w(q-tp{2Uv>vz$5Ae`&Zo#<A
zbl%4A|K<7r&+l}c9|MmAUj;r1B)}QqG_VJF;T?|i6c7OdSO9JU{(6Js{2I6fDBv#O
z<+nS|?||<E7l97~Hv@Zs=dX91PXnI<9sqj4Ex;>pbDX~ee*=CF{0#Ue@CD!$unKs<
zmA5+1i@>jdF9M$c<^UJC7I^+Gj`L+815N_B0<T=>IL`u40uKQb;3Tjg*aO@IJojeD
zc?|eCa3^r(TG#;m7Wg{wIpDKE3akKk0q+I&0oMaBzX?7Bz64wZ9swQz7Jw@#c>D>t
z1Uv~m3`BqcJ^=g!1(`npKLx%9JOMlkTm*)|y}$wBT>#D9OJyNtw-{jdQiF4>$7E8t
zVbt%a@qk{%=&X60JRS?xpI@7EO411kK@<;SKS*P>R&{Kdj*u68TTd8yVV-6QcV&{r
zJP^H8`B)g1QJRAtMB~37OI0j7nj<vwGoJI2AIhKr>kE&Vj#4n1hNAFeJ`hF|sBTzR
z8d#F*c}fv!U9~(I=LKmtap#;+M4Bi3kaIs0ycp-gUx!i{sfoLc#H%9jbq0B-%aS6^
zN<!7UV{Xye$5sS}C5pV6b7p<XauJJ7o)pZoRmPfJ6SE*qLP;;XsLGvcf_G-NWZ~{T
zos4)9tc#Eh<8;6>UKAov_8MbFti)n9Xvzl$_B$u$p=0O7g8qL<|G&LsW=zBq#)2Qb
ztogBC52X7$yE0qR<9Fw{6TkkgOKZfk^z2kWKg!dQPYSqoE#N_IjP*-7*}AQKAkzjp
z%>s<8I9j_*2a!aKVoQe>1uBDnkr$`uMefdNV=;qkn0?J#Q&3<d7UJ<JQ8x4|&6{!7
z)UpaN&8m8W62!KD$KvahQH`WPZHmxK!32l-2<8YWpO&kQC@fbQr$K+Q5tQW`Hg09O
zwtU2;Ofa;|Ce>%Xpy5)x(hYA2DO7}avDSSwQ2YkM!|PbVIOc_zw)q7g#KP6)PuXMi
z^udc{93unFPcsQAv+ZiAuGdo;IkeD9U5p3ZDf7A^+m<L6<l*%+Rf2&7OV}vQ#mr*F
znnI=#SXPQrT$Wa|SPpWY<83q2)5heMe_kde5jWkgKf%}}^F|_+JaKhPm+>vBdIuJa
z#rxPPz9E>QU}?mPb-}<QOwTi}n2s+ar{qWJ%@Bytr`T`{ITDcErE$uz)!OJ_Q0qJs
zKF@PLfonj~B-oAHR-GG}X)nZr427@cL!xK2@YTAEiv5w5ToU4rBxf~k(qk;hyAF|(
z%FuIDoyWI8s4z_;(Vyl_KQ$UBVlI-QaOVvP**v#n(A2f+ojtyM<U?ou)%%VgJJn^8
z>K$J??H^mkGNK3yawKzMbyU`mu-v#3Q(*KinH>M<(Jlh#q{xv%Moh&rl*zE1tuzzn
z&BR7n#KM!wCL&1Cno1OnbOT=`0g3gvDkhnL!G$%Z>S{zz)w(Tus&-KCHj|pBKAXA$
z1E5+Yi<(-<q;?Wxf&LTp9eOkg(vWhao~=%&LvqS+t+FfyuHeCX=?l<RDH3MliyzZy
z{Yz4;I?ZH8vFuS+LBP7Mz9A2HnfY|6`n0fSeWg2~s2->vv{1{!#yoXW*3=6adPH)_
z<S0w?g4O4>dN7e<!US7GZ8in!l1$`E8f=JSX&e`Flio)y#2LCF$(dl?oo=W8uF*+t
zP%L{@bm+HR@iHq<MC|2SVyUoe&9E5{FVM;u`vajoT9(PsAMr$Hl!dWpC<jqo7`x4v
zt06VAOGf3UMj@RC^}$tsr({$n+N#m2gST+8-|)7E17y@9h`GYos&Vm$@4biG4pW;E
zX^AmO2?EKIt%xBSRGRxMM^~4RVyWs8GLoWfYK#`(SID5o;izd@7CXt3;ZQWxTA>!B
z6wMi3hnJTZj`;UsHRelXPeWcJ#yO8`;*KpZ*J!)!#OkS~M#HIbB62OE`>_VASSAA5
z*ce=!rW3l|Is)xRI~}#wWpO&ho>L-eX=l~P+MZV%tv2)0+pA|*cHkcJvj4=9yJ!2=
z=BVsnJ+n01zmZfCcmMM8Y(LM#S;g;OIkH{n%31Vcw-?HxEO@NsFyS%Y1rHD<lyhy|
zY$96I9N#CQPuW<fa*9-ZoowTSERe|z+uKBxgX5HkK2I<di^h<vjTn=Yc-rIs%xW+@
zq(Z2|R~c4bb5NFxSq)F7@gzw{lE=r)!2!9io>(2nno@`a@gHF)s+U?+wj&ROun2!y
zShgkOQ8uBbB(qB}rG!zem!_RdX)l6vf-1^oM@efZ_0qV@f^3WwF;1*O5<8wE)th6p
zieVU(cj){-!P)jfoOkK`U;h678=UWd2s{R?0Ce8JjI;gIzy)9#csKAO&i3B`J`daj
z%mY_&rhf+b3Gf*p1Onha!0&Oc|1R(y;39AY*b7_({1IpQ7l2E^kAMs~1S|mm#5w+1
z;2GdyU<J4lxQw&>OTaIIZvhViX8{+u4)_b~`UUVb@F+lbW<Yzj9%wz#dZ6_{>w*8N
z2WVev%L`%T&{KJ#B1J_V)E~{Mxl_bjoi&_=aHr}ITE*712-_?N5|dpS%sZtFRK_$L
zs>1d=y3V)MS(_FDt#CSkML6U_V1LBb##Aaj>E3?59%oBvRBO|Nv^=WGRl@WKp^PN^
zP?=^VSteeQQgIlSyZ31ojcg2Q)(ohofZCZU<&Jo!&!f7q>KCaGm!Y?7UAL{mW{YCj
zKI+Pt-uuy!5>HxjoNKUIVkwUJF&IZNA9h)W!x$>GwlSj(D-C4W$}!DQA6adw7H&?`
zCWl2PvvA-koZLVj2z&aj3xPjSSd!QbyA2h+5jL20EzvON@dSnJUhf!o(!!J{8YI|w
z7QQL!yMsxuGg2t(&KBjHf?mfU?V-9PXq|<qr6vI{+<FJZ4HMMhD4fMs?T3Jd%;s=Y
zt}JI}hw(;yldLWj>>iStyT2G^bH*zS7oJFthd6PgaT0XPpCC*v;Jm~(`y_V9ZnbN6
z=jQbOe7ZD<kG<6TIErFnd~a9~R}#VV0_Orde_Anps<88?OKheU2OLvCXn3PJ&kP*<
zsFC#7j4Ae~3A8rOIj#;~S~Oj@fB(jLo)2x!XCFJBm?Dm?LZUQ_MX2jNRFtFxHaj$b
zn?9c4nN(v`N5Mf<*Il&oJ{2>tBv6#FMP%dgs4lp&syFW)s_D_J2W=o0XnjV3`N>BY
zVx7{jbl;Jpuk=X@@to^~ILfLqvuW`4a4>b{iU|p(%QW}P(uoyUJ_gay(PZ}Dtmv|L
zXFe5Q-MxnVwi{>S^WSnWd1>aa!JEDYkY-dtWq!9Y5~WxK5;A6KOL=clnNa5=RoO(o
zYK);DVeIc}=%X(j;}vg1Oq9D#fawC)*POgc2KY~-g4pjkbViy<1s7_nEmnGBf*+~#
zFNx3&f9aIIoF<|%fR<>DZt8<tBWQN6*tuqXCHO^RT9qsZlGl1{O3`1KGOt<QAhEx3
W(VR^Of=kJhBzd~W-Rt}Kw)zkMh8BeY

diff --git a/regress_rr.py b/regress_rr.py
index 111a9f9..38d987f 100644
--- a/regress_rr.py
+++ b/regress_rr.py
@@ -18,7 +18,7 @@ import pytz
 import matplotlib.pyplot as plt
 from functools import partial
 from collections import Counter
-from itertools import repeat, chain
+from itertools import repeat, chain, combinations
 from multiprocessing import Pool, cpu_count
 import tensorflow as tf
 
@@ -380,9 +380,9 @@ def load_and_sync_xsens(subject):
 
     xsens_list = []
     # skip the first and last x minute(s)
-    minutes_to_skip = 2
-    br_skiprows = br_skipfooter = minutes_to_skip*60
-    pss_skiprows = pss_skipfooter = minutes_to_skip*60*BR_FS
+    minutes_to_skip = .5
+    br_skiprows = br_skipfooter = int(minutes_to_skip*60)
+    pss_skiprows = pss_skipfooter = int(minutes_to_skip*60*BR_FS)
     # load each bioharness file and sync the imu to it
     for pss_file, br_file in zip(pss_list, br_list):
         pss_df = load_bioharness_file(pss_file, skiprows=pss_skiprows,
@@ -492,6 +492,70 @@ def get_respiration_log(subject):
     log_dfs = [pd.read_json(f) for f in log_list]
     return pd.concat(log_dfs, axis=0)
 
+def get_cal_data(event_df, xsens_df):
+    fmt ="%Y-%m-%d %H.%M.%S" 
+    cal_list = []
+    cpms = []
+    start_sec = 0
+    stop_sec = 0
+    for index, row in event_df.iterrows():
+        event = row['eventTag']
+        timestamp = row['timestamp']
+        inhalePeriod = row['inhalePeriod']
+        exhalePeriod = row['exhalePeriod']
+
+        cpm = np.round( 60/(inhalePeriod + exhalePeriod) )
+
+        sec = timestamp.to_pydatetime().timestamp()
+
+        if event == 'Start':
+            start_sec = sec
+            continue
+        elif event == 'Stop':
+            stop_sec = sec
+
+            dsync = DataSynchronizer()
+            dsync.set_bounds(xsens_df['sec'].values, start_sec, stop_sec)
+
+            sync_df = dsync.sync_df(xsens_df.copy())
+            cal_data = {'cpm': cpm, 'data': sync_df}
+            cal_list.append(cal_data)
+
+            assert np.round(sync_df.sec.iloc[0])==np.round(start_sec), \
+            "error with start sync"
+            assert np.round(sync_df.sec.iloc[-1])==np.round(stop_sec), \
+            "error with stop sync"
+
+    return pd.DataFrame(cal_list)
+
+def get_test_data(cal_df, activity_df, xsens_df, test_standing):
+    fmt = "%d/%m/%Y %H:%M:%S"
+    start_time = cal_df.iloc[-1]['data'].sec.values[-1]
+    data_df = xsens_df[xsens_df.sec > start_time]
+    activity_start = 0
+    activity_end = 0
+
+    activity_list = []
+
+    for index, row in activity_df.iterrows():
+        sec = datetime.strptime(row['Timestamps'], fmt).timestamp()
+        if not test_standing and row['Activity'] == 'standing':
+            continue
+        if row['Event'] == 'start':
+            activity_start = sec
+        elif row['Event'] == 'end':
+            activity_stop = sec
+
+            dsync = DataSynchronizer()
+            dsync.set_bounds(data_df['sec'].values, activity_start,
+                             activity_stop)
+
+            sync_df = dsync.sync_df(data_df.copy())
+            activity_data = {'activity': row['Activity'], 'data': sync_df}
+            activity_list.append(activity_data)
+
+    return pd.DataFrame(activity_list)
+
 # save evaluation metrics in single file that handles the models for the
 # subject and config
 class EvalHandler():
@@ -529,7 +593,8 @@ class EvalHandler():
             index_list = eval_hist[
                 (eval_hist['subject'] == self.entry['subject']) &\
                 (eval_hist['config_id'] == self.entry['config_id']) &\
-                (eval_hist['mdl_str'] == self.entry['mdl_str'])\
+                (eval_hist['mdl_str'] == self.entry['mdl_str']) &\
+                (eval_hist['cpm'] == self.entry['cpm'])\
             ].index.tolist()
             if len(index_list) == 0:
                 print("adding new entry")
@@ -549,11 +614,20 @@ def imu_rr_model(subject,
                  mdl_str='knn',
                  overwrite=False,
                  feature_method='tsfresh',
-                 train_len=1
+                 train_len:int=3,
+                 test_standing=False,
                 ):
     # window_size, window_shift, intra, inter
+    cal_str = 'cpm'
     fs = IMU_FS
     tmp = []
+    imu_cols = ['acc_x', 'acc_y', 'acc_z', 'gyro_x', 'gyro_y', 'gyro_z']
+    bvp_cols= ['bvp']
+
+    # TODO: 
+        # implement and input args config by data cols
+        # implement and input args config with test_standing
+    data_cols = imu_cols + bvp_cols
 
     do_minirocket = False
     use_tsfresh   = False
@@ -571,6 +645,8 @@ def imu_rr_model(subject,
               'do_minirocket' : do_minirocket,
               'use_tsfresh'   : use_tsfresh,
               'train_len'     : train_len,
+              'data_cols'     : data_cols,
+              'test_standing' : test_standing,
              }
 
     pfh = ProjectFileHandler(config)
@@ -586,8 +662,6 @@ def imu_rr_model(subject,
         print('Using pre-set data id: ', pfh.fset_id)
     project_dir = pfh.project_directory
 
-    marker = f'imu_rr_{subject}_id{pfh.fset_id}'
-
     if not use_tsfresh:
         xsens_df = load_and_sync_xsens(subject)
     else:
@@ -598,115 +672,115 @@ def imu_rr_model(subject,
                                 fs=IMU_FS,
                                 overwrite=overwrite_tsfresh)
 
-    large_win_size = 60*fs
-    xsens_inds = np.arange(len(xsens_df))
-    large_windows = vsw(xsens_inds, len(xsens_inds),
-                        sub_window_size=large_win_size,
-                        stride_size=large_win_size)
-    keep_inds = []
-    for i, win in enumerate(large_windows):
-        if win[-1] != 0:
-            keep_inds.append(i)
-
-    large_windows = large_windows[keep_inds]
+    activity_df = get_activity_log(subject)
+    event_df = get_respiration_log(subject)
+
+    cal_df = get_cal_data(event_df, xsens_df)
+
+    # include standing or not
+    test_df_tmp = get_test_data(cal_df, activity_df, xsens_df, test_standing)
+    test_df = pd.concat([df for df in test_df_tmp['data']], axis=0)
     
-    train_inds, test_inds = train_test_split(large_windows,
-                                             train_size=train_size,
-                                             shuffle=True,
-                                             random_state=123)
-
-    train_df = pd.concat([xsens_df.iloc[win] for win in train_inds if
-                          win[-1]!=0])
-    test_df = pd.concat([xsens_df.iloc[win] for win in test_inds if win[-1]!=0])
-
-    assert np.isin(train_df.index.values, test_df.index.values).any()==False,\
-            "overlapping test and train data"
-
-    print("train")
-    print(train_df.shape)
-    print("test")
-    print(test_df.shape)
-
-    if do_minirocket:
-        cols = ['acc_x', 'acc_y', 'acc_z', 'gyro_x', 'gyro_y', 'gyro_z']
-        x_train_df, y_train_df = get_df_windows(train_df,
-                                                imu_df_win_task,
-                                                window_size=window_size,
-                                                window_shift=window_shift,
-                                                fs=fs,
-                                               )
-        x_test_df, y_test_df = get_df_windows(test_df, 
-                                              imu_df_win_task,
-                                              window_size=window_size,
-                                              window_shift=window_shift,
-                                              fs=fs,
-                                             )
-
-        x_train = make_windows_from_id(x_train_df, cols)
-        x_test  = make_windows_from_id(x_test_df, cols)
-        y_train = y_train_df[lbl_str].values.reshape(-1, 1)
-        y_test  = y_test_df[lbl_str].values.reshape(-1, 1)
-
-        print("minirocket transforming...")
-        x_train = np.swapaxes(x_train, 1, 2)
-        x_test = np.swapaxes(x_test, 1, 2)
-        minirocket = MiniRocketMultivariate()
-        x_train    = minirocket.fit_transform(x_train)
-        x_test     = minirocket.transform(x_test)
-    elif use_tsfresh:
-        x_train = train_df.iloc[:, 3:].values
-        y_train = train_df[lbl_str].values.reshape(-1, 1)
-        x_test  = test_df.iloc[:, 3:].values
-        y_test  = test_df[lbl_str].values.reshape(-1, 1)
-    else:
-        cols = ['acc_x', 'acc_y', 'acc_z', 'gyro_x', 'gyro_y', 'gyro_z']
-        x_train_df, y_train_df = get_df_windows(train_df,
-                                                imu_df_win_task,
-                                                window_size=window_size,
-                                                window_shift=window_shift,
-                                                fs=fs,
-                                               )
-        x_test_df, y_test_df = get_df_windows(test_df, 
-                                              imu_df_win_task,
-                                              window_size=window_size,
-                                              window_shift=window_shift,
-                                              fs=fs,
-                                             )
-
-        x_train = make_windows_from_id(x_train_df, cols)
-        x_test  = make_windows_from_id(x_test_df, cols)
-        y_train = y_train_df[lbl_str].values.reshape(-1, 1)
-        y_test  = y_test_df[lbl_str].values.reshape(-1, 1)
-
-    transforms, model = model_training(mdl_str, x_train, y_train,
-                                       marker, validation_data=None,
-                                       overwrite=overwrite,
-                                       is_regression=True,
-                                       project_directory=project_dir,
-                                       window_size=int(window_size*fs),
-                                       extra_train=200,
-                                      )
-
-    if transforms is not None:
-        x_test = transforms.transform(x_test)
-
-    preds = model.predict(x_test)
-
-    eval_handle = EvalHandler(y_test.flatten(), preds.flatten(), subject,
-                              pfh, mdl_str, overwrite=overwrite)
-    eval_handle.update_eval_history()
-    eval_handle.save_eval_history()
-    pp = PrettyPrinter()
-    pp.pprint(eval_handle.load_eval_history())
-
-    fig, ax = plt.subplots()
-    ax.plot(y_test)
-    ax.plot(preds)
-    ax.set_title(' '.join([mdl_str, subject]))
-    ax.legend([lbl_str, 'pred'])
-    fig_dir = join(project_dir, 'figures',)
-    if not exists(fig_dir): mkdir(fig_dir)
-    fig.savefig(join(fig_dir, mdl_str))
+    for combi in combinations(cal_df[cal_str].values, train_len):
+        combi_str = "-".join([str(x) for x in combi])
+        pfh.config[cal_str] = combi_str
+        marker = f'imu_rr_{subject}_id{pfh.fset_id}_combi{combi_str}'
+        print(marker)
+
+        train_df = pd.concat(
+            [cal_df[cal_df[cal_str] == cpm]['data'].iloc[0] for cpm in combi],
+            axis=0
+        )
+
+        assert np.isin(train_df.index.values, test_df.index.values).any()==False,\
+                "overlapping test and train data"
+
+        print("train")
+        print(train_df.shape)
+        print("test")
+        print(test_df.shape)
+
+        if do_minirocket:
+            x_train_df, y_train_df = get_df_windows(train_df,
+                                                    imu_df_win_task,
+                                                    window_size=window_size,
+                                                    window_shift=window_shift,
+                                                    fs=fs,
+                                                   )
+            x_test_df, y_test_df = get_df_windows(test_df, 
+                                                  imu_df_win_task,
+                                                  window_size=window_size,
+                                                  window_shift=window_shift,
+                                                  fs=fs,
+                                                 )
+
+            x_train = make_windows_from_id(x_train_df, imu_cols)
+            x_test  = make_windows_from_id(x_test_df, imu_cols)
+            y_train = y_train_df[lbl_str].values.reshape(-1, 1)
+            y_test  = y_test_df[lbl_str].values.reshape(-1, 1)
+
+            print("minirocket transforming...")
+            x_train = np.swapaxes(x_train, 1, 2)
+            x_test = np.swapaxes(x_test, 1, 2)
+            minirocket = MiniRocketMultivariate()
+            x_train    = minirocket.fit_transform(x_train)
+            x_test     = minirocket.transform(x_test)
+        elif use_tsfresh:
+            x_train = train_df.iloc[:, 3:].values
+            y_train = train_df[lbl_str].values.reshape(-1, 1)
+            x_test  = test_df.iloc[:, 3:].values
+            y_test  = test_df[lbl_str].values.reshape(-1, 1)
+        else:
+            x_train_df, y_train_df = get_df_windows(train_df,
+                                                    imu_df_win_task,
+                                                    window_size=window_size,
+                                                    window_shift=window_shift,
+                                                    fs=fs,
+                                                   )
+            x_test_df, y_test_df = get_df_windows(test_df, 
+                                                  imu_df_win_task,
+                                                  window_size=window_size,
+                                                  window_shift=window_shift,
+                                                  fs=fs,
+                                                 )
+
+            x_train = make_windows_from_id(x_train_df, imu_cols)
+            x_test  = make_windows_from_id(x_test_df, imu_cols)
+            y_train = y_train_df[lbl_str].values.reshape(-1, 1)
+            y_test  = y_test_df[lbl_str].values.reshape(-1, 1)
+
+        transforms, model = model_training(mdl_str, x_train, y_train,
+                                           marker, validation_data=None,
+                                           overwrite=overwrite,
+                                           is_regression=True,
+                                           project_directory=project_dir,
+                                           window_size=int(window_size*fs),
+                                           extra_train=200,
+                                          )
+
+        if transforms is not None:
+            x_test = transforms.transform(x_test)
+
+        preds = model.predict(x_test)
+
+        eval_handle = EvalHandler(y_test.flatten(), preds.flatten(), subject,
+                                  pfh, mdl_str, overwrite=overwrite)
+        eval_handle.update_eval_history()
+        eval_handle.save_eval_history()
+
+        pp = PrettyPrinter()
+        pp.pprint(eval_handle.load_eval_history())
+
+        fig, ax = plt.subplots()
+        fig_title = '_'.join([mdl_str, subject]+[combi_str])
+        ax.plot(y_test)
+        ax.plot(preds)
+        ax.set_title(fig_title)
+        ax.legend([lbl_str, 'pred'])
+        fig_dir = join(project_dir, 'figures')
+        if not exists(fig_dir): mkdir(fig_dir)
+        fig.savefig(join(fig_dir, fig_title+".png"))
+        plt.close()
 
 def arg_parser():
     parser = argparse.ArgumentParser()
@@ -717,7 +791,7 @@ def arg_parser():
                                  'elastic'],
                        )
     parser.add_argument("-s", '--subject', type=int,
-                        default=1,
+                        default=2,
                         choices=list(range(1,3))+[-1],
                        )
     parser.add_argument("-f", '--feature_method', type=str,
@@ -734,10 +808,10 @@ def arg_parser():
                         default=0.2,
                        )
     parser.add_argument('-l', '--lbl_str', type=str,
-                        default='br',
+                        default='pss',
                        )
     parser.add_argument('-tl', '--train_len', type=int,
-                        default=1,
+                        default=3,
                         help='minutes of data to use for calibration'
                        )
     args = parser.parse_args()
@@ -748,13 +822,6 @@ if __name__ == '__main__':
     # '[!M]*'
     np.random.seed(100)
     n_subject_max = 2
-    xsens_df = load_and_sync_xsens('Pilot02')
-
-    activity_df = get_activity_log('Pilot02')
-    event_df = get_respiration_log('Pilot02')
-
-    ipdb.set_trace()
-    
     args = arg_parser()
 
     mdl_str        = args.model
-- 
GitLab