Skip to content
Snippets Groups Projects
regress_rr.py 40.6 KiB
Newer Older
Raymond Chia's avatar
Raymond Chia committed
import glob
Raymond Chia's avatar
Raymond Chia committed
from os import makedirs, mkdir
from os.path import join, exists
import pandas as pd
import numpy as np
import json
import ipdb
import re
import pickle
import sys
import time
Raymond Chia's avatar
Raymond Chia committed
from zipfile import ZipFile
Raymond Chia's avatar
Raymond Chia committed

import argparse
from datetime import datetime, timedelta, timezone, timedelta
import pytz

import matplotlib.pyplot as plt
from functools import partial
from collections import Counter
Raymond Chia's avatar
Raymond Chia committed
from itertools import repeat, chain, combinations
Raymond Chia's avatar
Raymond Chia committed
from multiprocessing import Pool, cpu_count
import tensorflow as tf

Raymond Chia's avatar
Raymond Chia committed
from sklearn.decomposition import PCA
Raymond Chia's avatar
Raymond Chia committed
from sklearn.preprocessing import StandardScaler, MinMaxScaler, OneHotEncoder
from sklearn.preprocessing import PolynomialFeatures, LabelEncoder
from sklearn.model_selection import KFold, train_test_split
from sklearn.metrics import accuracy_score

from tsfresh.feature_extraction import extract_features
from tsfresh.feature_extraction import settings as tsfresh_settings
from tsfresh.utilities.string_manipulation import get_config_from_string

from modules.datapipeline import get_file_list, load_and_snip, load_data, \
        load_split_data, load_harness_data
from modules.digitalsignalprocessing import vectorized_slide_win as vsw
from modules.digitalsignalprocessing import imu_signal_processing
Raymond Chia's avatar
Raymond Chia committed
from modules.digitalsignalprocessing import bvp_signal_processing
Raymond Chia's avatar
Raymond Chia committed
from modules.digitalsignalprocessing import hernandez_sp, reject_artefact
Raymond Chia's avatar
Raymond Chia committed
from modules.digitalsignalprocessing import do_pad_fft,\
Raymond Chia's avatar
Raymond Chia committed
        pressure_signal_processing, infer_frequency, movingaverage
Raymond Chia's avatar
Raymond Chia committed
from modules.utils import *

from modules.evaluations import Evaluation
from modules.datapipeline import get_windowed_data, DataSynchronizer,\
        parallelize_dataframe
from modules.datapipeline import ProjectFileHandler
from models.ardregression import ARDRegressionClass
from models.knn import KNNClass
from models.svm import SVMClass
from models.lda import LDAClass
from models.svr import SVRClass
from models.logisticregression import LogisticRegressionClass
from models.linearregression import LinearRegressionClass
from models.neuralnet import FNN_HyperModel, LSTM_HyperModel, TunerClass,\
        CNN1D_HyperModel
from models.ridgeclass import RidgeClass
from models.resnet import Regressor_RESNET, Classifier_RESNET
from models.xgboostclass import XGBoostClass

from pprint import PrettyPrinter

from sktime.transformations.panel.rocket import (
        MiniRocket,
        MiniRocketMultivariate,
        MiniRocketMultivariateVariable,
)

Raymond Chia's avatar
Raymond Chia committed
from config import WINDOW_SIZE, WINDOW_SHIFT, IMU_FS, DATA_DIR, BR_FS\
Raymond Chia's avatar
Raymond Chia committed
        , FS_RESAMPLE, PPG_FS
Raymond Chia's avatar
Raymond Chia committed

Raymond Chia's avatar
Raymond Chia committed
IMU_COLS =  ['acc_x', 'acc_y', 'acc_z', 'gyro_x', 'gyro_y', 'gyro_z']
Raymond Chia's avatar
Raymond Chia committed

def utc_to_local(utc_dt, tz=None):
    return utc_dt.replace(tzinfo=timezone.utc).astimezone(tz=tz)

Raymond Chia's avatar
Raymond Chia committed
def datetime_from_utc_to_local(utc_datetime):
    now_timestamp = time.time()
    offset = datetime.fromtimestamp(now_timestamp) - datetime.utcfromtimestamp(now_timestamp)
    return utc_datetime + offset

Raymond Chia's avatar
Raymond Chia committed
# Load data
def load_bioharness_file(f:str, skiprows=0, skipfooter=0, **kwargs):
    df_list = []
    method = partial(pd.read_csv, skipinitialspace=True,
                     skiprows=list(range(1, skiprows+1)),
                     skipfooter=skipfooter,
                     header=0,
                     **kwargs
                    )
    df = method(f)
    if 'Time' not in df.columns.values:
        df['Time'] = pd.to_datetime(
            df.rename(columns={'Date':'Day'})[
                ['Day','Month','Year']]) \
                + pd.to_timedelta(df['ms'], unit='ms')
        if pd.isna(df['Time']).any():
            df['Time'].interpolate(inplace=True)
        df['Time'] = pd.to_datetime(df['Time'], format="%d/%m/%Y %H:%M:%S.%f")
        df['Time'] = df['Time'].dt.strftime("%d/%m/%Y %H:%M:%S.%f")
    return df

def load_bioharness_files(f_list:list, skiprows=0, skipfooter=0, **kwargs):
    df_list = []
    method = partial(pd.read_csv, skipinitialspace=True,
                     skiprows=list(range(1, skiprows+1)),
                     skipfooter=skipfooter,
                     header=0, **kwargs)
    for f in f_list:
        df_list.append(load_bioharness_file(f))

    df = pd.concat(df_list, ignore_index=True)
    return df

def bioharness_datetime_to_seconds(val):
    fmt = "%d/%m/%Y %H:%M:%S.%f" 
    dstr = datetime.strptime(val, fmt)
    seconds = dstr.timestamp()
    return seconds

def load_imu_file(imu_file:str):
    hdr_file = imu_file.replace('imudata.gz', 'recording.g3')

    df = pd.read_json(imu_file, lines=True, compression='gzip')
    hdr = pd.read_json(hdr_file, orient='index')
    hdr = hdr.to_dict().pop(0)

    if df.empty: return df, hdr

    data_df = pd.DataFrame(df['data'].tolist())
    df = pd.concat([df.drop('data', axis=1), data_df], axis=1)

    iso_tz = hdr['created']
    tzinfo = pytz.timezone(hdr['timezone'])
    # adjust for UTC
    start_time = datetime.fromisoformat(iso_tz[:-1])
    start_time = utc_to_local(start_time, tz=tzinfo).astimezone(tzinfo)

    na_inds = df.loc[pd.isna(df['accelerometer']), :].index.values
    df.drop(index=na_inds, inplace=True)

    imu_times = df['timestamp'].values
    df['timestamp_interp'] = imu_times
    df['timestamp_interp'] = df['timestamp_interp'].interpolate()
    imu_times = df['timestamp_interp'].values
    imu_datetimes = [start_time + timedelta(seconds=val) \
                     for val in imu_times]
    imu_s = np.array([time.timestamp() for time in imu_datetimes])
    df['sec'] = imu_s

    time_check_thold = df['sec'].min() + 3*3600
    mask = df['sec'] > time_check_thold
    if np.any(mask):
        df = df[np.logical_not(mask)]

    return df, hdr

def load_imu_files(f_list:list):
    data, hdr = [], []
    tmp = []
    for f in f_list:
        tmp.append(load_imu_file(f))
    for l in tmp:
        data.append(l[0])
        hdr.append(l[1])
    data_df = pd.concat(data, axis=0)
    return data_df, hdr

Raymond Chia's avatar
Raymond Chia committed
def load_e4_file(e4_file:str):
    ''' First row is the initial time of the session as unix time.
    Second row is the sample rate in Hz'''
    zip_file = ZipFile(e4_file)
    dfs = {csv_file.filename: pd.read_csv(zip_file.open(csv_file.filename)
                                          ,header=None)
           for csv_file in zip_file.infolist()
           if csv_file.filename.endswith('.csv')}
    bvp = dfs["BVP.csv"]
    t0 = bvp.iloc[0].values[0]
    fs = bvp.iloc[1].values[0]
    nsamples = len(bvp) - 2

    t0_datetime = datetime.utcfromtimestamp(t0)
    t0_local = datetime_from_utc_to_local(t0_datetime)
    time = [t0_local.timestamp() + ind*(1/fs) for ind in
            range(nsamples)]
    tmp = [np.nan, np.nan]
    time = tmp + time
    bvp.rename(columns={0: "bvp"}, inplace=True)
Raymond Chia's avatar
Raymond Chia committed
    bvp['sec'] = np.array(time)

    head = bvp.iloc[[0, 1]]
    bvp.drop(inplace=True, index=[0, 1])

    hdr = {'start_time': head.iloc[0,0],
           'fs': head.iloc[0,1]}
Raymond Chia's avatar
Raymond Chia committed

    return bvp, hdr

def load_e4_files(f_list:list):
Loading full blame...