# %% Load packages
import os, pickle
from pathlib import Path
# Set directories
cwd = Path.cwd()
baseDir = cwd.parent
dataDir = baseDir / 'data'
# Load custom functions
from functions.mp_estimator import loaddata, estimate
#%% Muscle & folders
for mus in ['GMe1', 'GMe2', 'GMe3']:
dataDirMus = os.path.join(dataDir,mus,'dataExp','')
parFile = os.path.join(dataDir,mus,'parameters',mus)
#%% Settings and load data
optsQR = {
'dataDir': dataDirMus+'QR',
'iCols': (0,1,3),
'idxQRmin': 'auto',
'idxQRpre': 'auto',
'idxQRpst': 'auto',
'nQRsamp': 'auto',
'dispFig': False,
}
optsSR = {
'dataDir': dataDirMus+'SR',
'iCols': (0,1,3),
'idxSRcon': 'auto',
'nSRsamp': 'auto',
'dispFig': False,
}
optsISOM = {
'dataDir': dataDirMus+'SR',
'iCols': (0,1,3,2),
'durStimOffset': 0.1,
'dispFig': False,
}
dataQR,idxQRmin,idxQRpre,idxQRpst = loaddata.qr(optsQR)
dataSR,idxSRcon = loaddata.sr(optsSR)
dataISOM, idxSEL = loaddata.isom(optsISOM)
#%% Traditional method
muspar = loaddata.get_muspar()
estpar,dataQRout = estimate.fl(dataQR,muspar)
estpar,dataSRout = estimate.fv(dataSR,muspar,estpar)
estpar,dataACTout = estimate.act(dataISOM,muspar,estpar,do_print=True)
pickle.dump([{**muspar,**estpar}, dataQRout, dataSRout, dataACTout], open(parFile+'_TM_SR.pkl', 'wb'))
#%% Improved method
muspar = loaddata.get_muspar()
estparr,dataQRout,dataSRout = estimate.im(dataQR,dataSR,muspar,do_print=True)
estparr,dataACTout = estimate.act(dataISOM,muspar,estparr,do_print=True)
pickle.dump([{**muspar,**estparr}, dataQRout, dataSRout, dataACTout], open(parFile+'_IM.pkl', 'wb'))
Evaluating model predictions - in situ data
The analysis of this page corresponds to the section with the same title (i.e., ‘Evaluating model predictions - in situ data’).
There are three steps:
- Step 1: Estimate muscle property values.
- Step 2: Simulate protocols to predict SEE over time.
- Step 3: Compute variables for manuscript.
Step 1: Estimate muscle property values
Step 2: Simulate protocols to predict SEE over time
# %% Load packages & set directories
import pickle, os, glob
from functions.helpers import simulate_file
# Set directories
cwd = os.path.dirname(os.path.abspath(__file__))
baseDir = os.path.join(cwd,'..')
dataDir = os.path.join(baseDir,'data')
# %% Simulate to predict SEE force based on experimental data
for mus in ['GMe1', 'GMe2', 'GMe3']:
for vPar in ['TM', 'IM']:
# Load parameters
parFile = os.path.join(dataDir, mus, 'parameters', f'{mus}_{vPar}.pkl')
muspar = pickle.load(open(parFile, 'rb'))[0]
for exp in ['QR','SR','ISOM', 'SSC']:
dataDirExp = os.path.join(dataDir, mus, 'dataExp', exp)
dataDirSim = os.path.join(dataDir, mus, 'simsExp', vPar, exp, '')
# Clean old files
for f in glob.glob(os.path.join(dataDirSim, '*')):
os.remove(f)
# Process each file
for filepath in glob.glob(os.path.join(dataDirExp, '*')):
df_sim, df_exp = simulate_file(filepath, muspar)
fname = os.path.basename(filepath)[:-6]
outName = f"{fname}{vPar}.csv"
df_sim.to_csv(os.path.join(dataDirSim, outName), index=False)
print(f"Saved: {outName}")Step 3: Compute variables for manuscript
# %% Load packages
import os, pickle, sys, glob
import numpy as np
import pandas as pd
from pathlib import Path
# Directories
cwd = Path.cwd()
baseDir = cwd.parent
dataDir = baseDir / 'data'
funcDir = baseDir / 'analysis' / 'functions'
sys.path.append(str(funcDir))
# Custom functions
import helpers, stats
# %% Extract parameters
muscles = ['GMe1', 'GMe2', 'GMe3']
param_keys = ['a', 'b', 'kpee', 'ksee', 'fmax', 'lce_opt', 'lpee0', 'lsee0', 'tact', 'tdeact']
orParms, tmParms, imParms = [], [], []
for mus in muscles:
tmPar, dataQRtm = pickle.load(open(os.path.join(dataDir,mus,'parameters',mus+'_TM.pkl'), 'rb'))[0:2]
imPar, dataQRim = pickle.load(open(os.path.join(dataDir,mus,'parameters',mus+'_IM.pkl'), 'rb'))[0:2]
parms = [tmPar[k] for k in param_keys]
tmParms.append(parms)
parms = [imPar[k] for k in param_keys]
imParms.append(parms)
# Store in pandas dataframe
df_tm = pd.DataFrame(list(zip(*tmParms)), columns=['GM1', 'GM2', 'GM3'], index=param_keys) # df with estimated parameter values using the traditional method
df_im = pd.DataFrame(list(zip(*imParms)), columns=['GM1', 'GM2', 'GM3'], index=param_keys) # df with estimated parameter values using the improved method
# %% Compute percentage differences from real/actual and estimated parameter values
# Positive: Imporoved is that % higher than Traditional
# Negative: Imporoved is that % lower than Traditional
df_p = stats.pdiff(df_im, df_tm)
# %% Compute RMSDs
par_models = ['TM', 'IM']
experiments = ['QR', 'SR', 'ISOM', 'SSC']
muscles = ['GMe1', 'GMe2', 'GMe3']
avg_mean_all = []
columns = [f'{model}_{exp}' for model in par_models for exp in experiments]
rows = muscles + ['Avg ± Std']
df = pd.DataFrame(index=rows, columns=columns, dtype=str)
for exp in experiments:
for model in par_models:
all_rmsd = []
for mus in muscles:
# Load parameter
parFile = os.path.join(dataDir, mus, 'parameters', f'{mus}_{model}.pkl')
muspar = pickle.load(open(parFile, 'rb'))[0]
dataDirExp = os.path.join(dataDir, mus, 'dataExp', exp)
rrunDirExp = os.path.join(dataDir, mus, 'simsExp', model, exp)
dataFiles = sorted(glob.glob(os.path.join(dataDirExp, '*')))
rrunFiles = sorted(glob.glob(os.path.join(rrunDirExp, '*')))
rms_list = []
for dataFile, rrunFile in zip(dataFiles, rrunFiles):
dataFilename = os.path.basename(dataFile)[:-4]
rrunFilename = os.path.basename(rrunFile)[:-4]
if dataFilename[:-3] != rrunFilename[:-3]:
raise ValueError(f"File mismatch: {dataFilename} vs {rrunFilename}")
dataData = pd.read_csv(dataFile).T.to_numpy()[0:4]
rrunData = pd.read_csv(rrunFile).T.to_numpy()
time1, _, stim1, fsee1 = dataData
time2, _, _, fsee2 = rrunData
tStimOn, tStimOff = helpers.get_stim(time1, stim1)[1:]
iStart = int(np.argmin(abs(time1 - tStimOn[0])))
if exp == 'ISOM':
iStop = int(np.argmin(abs(time1 - 0.1 - tStimOff[0])))
elif exp in ['QR', 'SR']:
iStop = int(np.argmin(abs(time1 - tStimOff[0])))
else: # SSC
iStop = int(np.argmin(abs(time1 - tStimOff[-1])))
rms = stats.rmse(fsee1[iStart:iStop], fsee2[iStart:iStop]) / muspar['fmax'] * 100
rms_list.append(rms)
M = np.mean(rms_list)
S = np.std(rms_list)
df.loc[mus, f'{model}_{exp}'] = f'{M:.1f} ± {S:.1f}'
all_rmsd += rms_list # just mean, for averaging across muscles
# Average over muscles for this (exp, model)
avg_M = np.mean(all_rmsd)
avg_S = np.std(all_rmsd)
df.loc['Avg ± Std', f'{model}_{exp}'] = f'{avg_M:.1f} ± {avg_S:.1f}'
avg_mean_all.append(avg_M)
avg_mean_all = np.array(avg_mean_all)
#%% 3.3: Evaluating model predictions
# Avg. difference: from traditional to improved method: SEE stiffness
p_IS_ksee_avg = f'{df_p.loc['ksee'].mean():0.0f}' # [%] avg. percentage difference
# Avg. difference: from traditional to improved method: tact
p_IS_tact_avg = f'{df_p.loc['tact'].mean():0.0f}' # [%] avg. percentage difference
# Avg. difference: from traditional to improved method: tdeact
p_IS_tdeact_avg = f'{df_p.loc['tdeact'].mean():0.0f}' # [%] avg. percentage difference
# Avg. difference: from traditional to improved method: lce_opt
p_IS_lceopt_avg = f'{df_p.loc['lce_opt'].mean():0.0f}' # [%] avg. percentage difference
# Take the mean over all each sort of experiment
p_IS_rmsd = f'{stats.pdiff(avg_mean_all[1::2],avg_mean_all[0::2]).mean():2.0f}'