From c3723bdade6609165d2795069f9200e0a64d87ef Mon Sep 17 00:00:00 2001 From: Caro Nettekoven <30801389+carobellum@users.noreply.github.com> Date: Sat, 28 Sep 2024 14:50:06 -0400 Subject: [PATCH 01/26] Update spm.py --- nitools/spm.py | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/nitools/spm.py b/nitools/spm.py index 549a7a7..af417a0 100644 --- a/nitools/spm.py +++ b/nitools/spm.py @@ -14,6 +14,7 @@ import numpy as np import nitools as nt from scipy.io import loadmat +import h5py class SpmGlm: @@ -35,7 +36,15 @@ def get_info_from_spm_mat(self): Args: spm_mat_path (str): _description_ """ - SPM = loadmat(f"{self.path}/SPM.mat", simplify_cells=True)['SPM'] + try: + # SPM = loadmat(f"{self.path}/SPM.mat", simplify_cells=True)['SPM'] + SPM = loadmat(f"{self.path}/SPM.mat", simplify_cells=True) + spm_file_loaded_with_scipyio = True + except Exception as e: + print(f"Error loading SPM.mat file. The file was saved as mat-file version 7.3 (see https://www.mathworks.com/help/matlab/import_export/mat-file-versions.html). Try loading the mat-file with Matlab, and saving it as mat-file version 7.0 intead. Use this command: ") + print(f"cp {self.path}/SPM.mat {self.path}/SPM.mat.backup") + print(f"matlab -nodesktop -nosplash -r \"load('{self.path}/SPM.mat'); save('{self.path}/SPM.mat', '-struct', 'SPM', '-v7'); exit\"") + # Get basic information from SPM.mat self.nscans = SPM['nscan'] self.nruns = len(self.nscans) @@ -59,6 +68,9 @@ def get_info_from_spm_mat(self): self.eff_df = SPM['xX']['erdf'] # Effective degrees of freedom self.weight = SPM['xX']['W'] # Weight matrix for whitening self.pinvX = SPM['xX']['pKX'] # Pseudo-inverse of (filtered and weighted) design matrix + pass + + def relocate_file(self, fpath: str) -> str: """SPM file entries to current project directory and OS. @@ -146,6 +158,6 @@ def spm_filter(self,data): fdata = data.copy() for i in range(self.nruns): - Y = fdata[scan_bounds[i]:scan_bounds[i+1],:]; + Y = fdata[scan_bounds[i]:scan_bounds[i+1],:] Y = Y - self.filter_matrices[i] @ (self.filter_matrices[i].T @ Y) return fdata From 197882b08f53e068b83141a53dded03da3b08496 Mon Sep 17 00:00:00 2001 From: Caro Nettekoven <30801389+carobellum@users.noreply.github.com> Date: Sun, 29 Sep 2024 10:06:04 -0400 Subject: [PATCH 02/26] Update spm.py --- nitools/spm.py | 40 +++++++++++++++++++++++++++++++++++++--- 1 file changed, 37 insertions(+), 3 deletions(-) diff --git a/nitools/spm.py b/nitools/spm.py index af417a0..fb98412 100644 --- a/nitools/spm.py +++ b/nitools/spm.py @@ -68,7 +68,7 @@ def get_info_from_spm_mat(self): self.eff_df = SPM['xX']['erdf'] # Effective degrees of freedom self.weight = SPM['xX']['W'] # Weight matrix for whitening self.pinvX = SPM['xX']['pKX'] # Pseudo-inverse of (filtered and weighted) design matrix - pass + @@ -135,7 +135,7 @@ def get_residuals(self,mask): # Filter and temporal pre-whiten the data fdata= self.spm_filter(self.weight @ data) # spm_filter - # Estimate the beta coefficients abd residuals + # Estimate the beta coefficients and residuals beta = self.pinvX @ fdata residuals = fdata - self.design_matrix @ beta @@ -159,5 +159,39 @@ def spm_filter(self,data): fdata = data.copy() for i in range(self.nruns): Y = fdata[scan_bounds[i]:scan_bounds[i+1],:] - Y = Y - self.filter_matrices[i] @ (self.filter_matrices[i].T @ Y) + if self.filter_matrices[i].size > 0: + # Only apply with filter matrices are not empty + Y = Y - self.filter_matrices[i] @ (self.filter_matrices[i].T @ Y) return fdata + + def rerun_glm(self,data): + """ + Re-estimate the GLM on new data + + Args: + data (ndarray): 2d array of time series data (TxP) + Returns: + beta (ndarray): 2d array of beta coefficients (PxQ) + info (dict): with lists reg_name and run_number (Q long) + data_filt (ndarray): 2d array of filtered time series data (TxP) + data_hat (ndarray): 2d array of predicted time series data (TxP) + data_adj (ndarray): 2d array of adjusted time series data (TxP) + residuals (ndarray): 2d array of residuals (TxP) + """ + + # Filter and temporal pre-whiten the data + data_filt= self.spm_filter(self.weight @ data) # spm_filter + + # Estimate the beta coefficients and residuals + beta = self.pinvX @ data_filt + residuals = data_filt - self.design_matrix @ beta + # Get estimated (predicted) timeseries + data_hat = self.design_matrix[:, self.reg_of_interest] @ beta[self.reg_of_interest, :] + # Get adjusted timeseries + data_adj = data_hat + residuals + + # Return the regressors of interest (apart from the constant) + indx = self.reg_of_interest-1 + info = {'reg_name': self.beta_names[indx], 'run_number': self.run_number[indx]} + + return beta[indx,:], info, data_filt, data_hat, data_adj, residuals \ No newline at end of file From 3e477aaeec8bd3daac5e435632a9dc7e95bafd27 Mon Sep 17 00:00:00 2001 From: Caro Nettekoven <30801389+carobellum@users.noreply.github.com> Date: Sun, 29 Sep 2024 11:00:53 -0400 Subject: [PATCH 03/26] Update spm.py --- nitools/spm.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/nitools/spm.py b/nitools/spm.py index fb98412..8abd246 100644 --- a/nitools/spm.py +++ b/nitools/spm.py @@ -185,8 +185,9 @@ def rerun_glm(self,data): # Estimate the beta coefficients and residuals beta = self.pinvX @ data_filt residuals = data_filt - self.design_matrix @ beta - # Get estimated (predicted) timeseries - data_hat = self.design_matrix[:, self.reg_of_interest] @ beta[self.reg_of_interest, :] + # Get estimated (predicted) timeseries (regressors of interest without the constant) + data_hat = self.design_matrix[:, self.reg_of_interest[:-1]] @ beta[self.reg_of_interest[:-1], :] + # Get adjusted timeseries data_adj = data_hat + residuals From b575f75df0c18afd9ab7579b73d2efccf33f7c05 Mon Sep 17 00:00:00 2001 From: Caro Nettekoven <30801389+carobellum@users.noreply.github.com> Date: Sun, 29 Sep 2024 12:27:29 -0400 Subject: [PATCH 04/26] Update spm.py --- nitools/spm.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nitools/spm.py b/nitools/spm.py index 8abd246..221777c 100644 --- a/nitools/spm.py +++ b/nitools/spm.py @@ -14,7 +14,7 @@ import numpy as np import nitools as nt from scipy.io import loadmat -import h5py +import pandas as pd class SpmGlm: @@ -193,6 +193,6 @@ def rerun_glm(self,data): # Return the regressors of interest (apart from the constant) indx = self.reg_of_interest-1 - info = {'reg_name': self.beta_names[indx], 'run_number': self.run_number[indx]} + info = pd.DataFrame({'reg_name': self.beta_names[indx], 'run_number': self.run_number[indx]}) return beta[indx,:], info, data_filt, data_hat, data_adj, residuals \ No newline at end of file From 469666f087b965f9b4b149b669fa6008d789ed48 Mon Sep 17 00:00:00 2001 From: Joern Diedrichsen Date: Tue, 1 Oct 2024 12:08:11 -0400 Subject: [PATCH 05/26] Comments improvement --- nitools/spm.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/nitools/spm.py b/nitools/spm.py index 221777c..b2e1b26 100644 --- a/nitools/spm.py +++ b/nitools/spm.py @@ -166,7 +166,7 @@ def spm_filter(self,data): def rerun_glm(self,data): """ - Re-estimate the GLM on new data + Re-estimate the GLM (without hyperparameter estimation) on new data Args: data (ndarray): 2d array of time series data (TxP) @@ -174,8 +174,10 @@ def rerun_glm(self,data): beta (ndarray): 2d array of beta coefficients (PxQ) info (dict): with lists reg_name and run_number (Q long) data_filt (ndarray): 2d array of filtered time series data (TxP) - data_hat (ndarray): 2d array of predicted time series data (TxP) + data_hat (ndarray): 2d array of predicted time series data (TxP) - + This is predicted only using regressors of interest (without the constant or other nuisance regressors) data_adj (ndarray): 2d array of adjusted time series data (TxP) + This is filtered timeseries with constants and other nuisance regressors substrated out residuals (ndarray): 2d array of residuals (TxP) """ From 6d1e2239912e96847fd2552efca447ffddc39ab5 Mon Sep 17 00:00:00 2001 From: Joern Diedrichsen Date: Sun, 1 Dec 2024 13:08:42 -0500 Subject: [PATCH 06/26] Removed registration of colormap Not compatible with newer versions - and AFAIK not necessary --- nitools/gifti.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/nitools/gifti.py b/nitools/gifti.py index 165ec6e..b377751 100644 --- a/nitools/gifti.py +++ b/nitools/gifti.py @@ -296,8 +296,9 @@ def get_gifti_colortable(gifti,ignore_zero=False): labels = labels[1:] cmap = mpl.colors.LinearSegmentedColormap.from_list('mylist', rgba, N=len(rgba)) - mpl.cm.unregister_cmap("mycolormap") - mpl.cm.register_cmap("mycolormap", cmap) + # Removed - incomplatible with newer versions of matplotlib + # mpl.cm.unregister_cmap("mycolormap") + # mpl.cm.register_cmap("mycolormap", cmap) return rgba, cmap From f4e23295f716c93bc28c5ab63ee503cd9fcf0655 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=B6rn=20Diedrichsen?= Date: Sun, 1 Dec 2024 17:04:50 -0500 Subject: [PATCH 07/26] resampling --- nitools/volume.py | 57 ++++++++++++++++++++++++++++------------------- 1 file changed, 34 insertions(+), 23 deletions(-) diff --git a/nitools/volume.py b/nitools/volume.py index 12a25d0..1d02faa 100644 --- a/nitools/volume.py +++ b/nitools/volume.py @@ -164,6 +164,7 @@ def euclidean_dist_sq(coordA,coordB): def sample_image(img,xm,ym,zm,interpolation): """ Return values after resample image + ignores NaN values Args: img (Nifti image): @@ -218,15 +219,13 @@ def sample_image(img,xm,ym,zm,interpolation): c011 = D[ir, jr+1, kr+1] c001 = D[ir, jr, kr+1] - c00 = c000*(1-id)+c100*id - c01 = c001*(1-id)+c101*id - c10 = c010*(1-id)+c110*id - c11 = c011*(1-id)+c111*id - - c0 = c00*(1-jd)+c10*jd - c1 = c01*(1-jd)+c11*jd - - value = c0*(1-kd)+c1*kd + c00 = nan_weighted_mean(c100,c000,id) + c01 = nan_weighted_mean(c101,c001,id) + c10 = nan_weighted_mean(c110,c010,id) + c11 = nan_weighted_mean(c111,c011,id) + c0 = nan_weighted_mean(c10,c00,jd) + c1 = nan_weighted_mean(c11,c01,jd) + value = nan_weighted_mean(c1,c0,kd) elif interpolation == 0: ir = np.rint(im).astype('int') jr = np.rint(jm).astype('int') @@ -245,6 +244,18 @@ def sample_image(img,xm,ym,zm,interpolation): value[invalid]=0 return value +def nan_weighted_mean(d1,d2,weight): + j = np.isnan(d1) + k = np.isnan(d2) + if j.sum()==0 & k.sum()==0: + return d1*weight + d2*(1-weight) + else: + dd1 = d1.copy() + dd2 = d2.copy() + dd1[j]=dd2[j] + dd2[k]=dd1[k] + return (dd1*weight + dd2*(1-weight)) + def check_voxel_range(img,i,j,k): """ Checks if i,j,k voxels coordinates are within an image @@ -294,17 +305,17 @@ def change_nifti_numformat(infile, """Changes the number format of a nifti file and saves it. Args: - infile (str): file name of input file - outfile (str): file name output file + infile (str): file name of input file + outfile (str): file name output file new_numformat (str): New number format. Defaults to 'uint16'. - type_cast_data (bool): If true, typecasts the data as well. + type_cast_data (bool): If true, typecasts the data as well. Note: - typecast_data changes the data format and can lead to and wrap-around of values, as the data is typecasted as well. - Do only when correcting a previous mistake in processing. + typecast_data changes the data format and can lead to and wrap-around of values, as the data is typecasted as well. + Do only when correcting a previous mistake in processing. """ A = nb.load(infile) X = A.get_fdata() - if typecast_data: + if typecast_data: X = X.astype(new_numformat) head = A.header.copy() head.set_data_dtype(new_numformat) @@ -314,20 +325,20 @@ def change_nifti_numformat(infile, def sample_images(imgs,coords,use_dataobj=True): """ Samples a list of images at a set of voxels (coordinates nearest neighbor) - 3d images are being returned as a single value per voxel. 4d images are being returned as a vector per voxel. - The function respects ths SPM-convention of specifying time slices 'img.nii,1' - Note that either all or none of the image names need to follow the 'img.nii,1' convention. + 3d images are being returned as a single value per voxel. 4d images are being returned as a vector per voxel. + The function respects ths SPM-convention of specifying time slices 'img.nii,1' + Note that either all or none of the image names need to follow the 'img.nii,1' convention. Args: imgs (list): - List of Nifti file names (SPM convention for time slices is respected) + List of Nifti file names (SPM convention for time slices is respected) coords (np-array): - 3xN matrix of coordinates of to be sampled voxels + 3xN matrix of coordinates of to be sampled voxels Returns: values (np-array): Array contains all values in the image """ n = len(imgs) - if ',' in imgs[0]: # SPM-convention of indicating time slice with ',x' notation + if ',' in imgs[0]: # SPM-convention of indicating time slice with ',x' notation fnames=[f.split(',')[0] for f in imgs] slice=np.array([f.split(',')[1] for f in imgs],dtype=int)-1 unique_fnames,img_indx = np.unique(fnames,return_inverse=True) @@ -344,7 +355,7 @@ def sample_images(imgs,coords,use_dataobj=True): img_indx.append(np.array([i],dtype=int)) slice.append(np.array([0],dtype=int)) slice = np.concatenate(slice) - img_indx = np.concatenate(img_indx) + img_indx = np.concatenate(img_indx) N = len(slice) # Load the header from all the input files voxels = coords_to_voxelidxs(coords,input_vols[0]) @@ -355,7 +366,7 @@ def sample_images(imgs,coords,use_dataobj=True): data_block = v.dataobj[min(voxels[0]):max(voxels[0])+1,min(voxels[1]):max(voxels[1])+1,min(voxels[2]):max(voxels[2])+1] D = data_block[voxels[0]-min(voxels[0]),voxels[1]-min(voxels[1]),voxels[2]-min(voxels[2])] if v.ndim==3: - data[dindx,:]=D + data[dindx,:]=D else: data[dindx,:]=D[:,slice[dindx]].T else: From 91847de2dff972f7407849c01bbf894937ca5600 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=B6rn=20Diedrichsen?= Date: Sun, 1 Dec 2024 20:30:42 -0500 Subject: [PATCH 08/26] volumne_from_cifti --- nitools/cifti.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/nitools/cifti.py b/nitools/cifti.py index 7d82c86..fafed93 100644 --- a/nitools/cifti.py +++ b/nitools/cifti.py @@ -26,7 +26,7 @@ def make_label_cifti(data, bm_axis, label_RGBA (list): List of rgba vectors for labels Returns: - cifti (GiftiImage): Label gifti image + cifti (GiftiImage): Label gifti image """ if data.ndim == 1: # reshape to (1, num_vertices) @@ -213,7 +213,7 @@ def split_cifti_to_giftis(cifti_img, type = "label", column_names = None): )) return gii -def volume_from_cifti(cifti, struct_names=[]): +def volume_from_cifti(cifti, struct_names=None): """ Gets the 4D nifti object containing the data for all subcortical (volume-based) structures @@ -222,7 +222,7 @@ def volume_from_cifti(cifti, struct_names=[]): cifti object containing the data struct_names (list or None): List of structure names that are included - defaults to None + defaults to None (all) Returns: nii_vol(niftiImage): nifti object containing the data @@ -232,7 +232,10 @@ def volume_from_cifti(cifti, struct_names=[]): # get the data array with all the time points, all the structures d_array = cifti.get_fdata(dtype=np.float32) - struct_names = [nb.cifti2.BrainModelAxis.to_cifti_brain_structure_name(n) for n in struct_names] + if struct_names is None: + struct_names = [a for a,_,_ in bmf.iter_structures()] + else: + struct_names = [nb.cifti2.BrainModelAxis.to_cifti_brain_structure_name(n) for n in struct_names] # initialize a matrix representing 4D data (x, y, z, time_point) vol = np.zeros(bmf.volume_shape + (d_array.shape[0],)) From 3f4e072d24c731ea87cfb084ac45a5234f70e675 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=B6rn=20Diedrichsen?= Date: Sat, 1 Feb 2025 15:03:56 -0500 Subject: [PATCH 09/26] volumes_from_cifti --- nitools/cifti.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/nitools/cifti.py b/nitools/cifti.py index fafed93..aefcb18 100644 --- a/nitools/cifti.py +++ b/nitools/cifti.py @@ -241,14 +241,13 @@ def volume_from_cifti(cifti, struct_names=None): vol = np.zeros(bmf.volume_shape + (d_array.shape[0],)) for idx, (nam,slc,bm) in enumerate(bmf.iter_structures()): - if (any(s in nam for s in struct_names)): + if (nam in struct_names): ijk = bm.voxel bm_data = d_array[:, slc] i = (ijk[:,0] > -1) # fill in data vol[ijk[i, 0], ijk[i, 1], ijk[i, 2], :]=bm_data[:,i].T - # save as nii nii_vol_4d = nb.Nifti1Image(vol,bmf.affine) return nii_vol_4d From f61e3997e5ae644647ecddcb8f816f160098da86 Mon Sep 17 00:00:00 2001 From: Marco Emanuele <97095997+mnlmrc@users.noreply.github.com> Date: Wed, 5 Mar 2025 17:59:32 -0500 Subject: [PATCH 10/26] Update spm.py --- nitools/spm.py | 24 +++++++++++++++++++++++- 1 file changed, 23 insertions(+), 1 deletion(-) diff --git a/nitools/spm.py b/nitools/spm.py index b2e1b26..6731a39 100644 --- a/nitools/spm.py +++ b/nitools/spm.py @@ -71,7 +71,6 @@ def get_info_from_spm_mat(self): - def relocate_file(self, fpath: str) -> str: """SPM file entries to current project directory and OS. @@ -144,6 +143,29 @@ def get_residuals(self,mask): info = {'reg_name': self.beta_names[indx], 'run_number': self.run_number[indx]} return residuals, beta[indx,:], info + def region_getts(self, mask, stats='mean'): + + # Sample the relevant time series data + coords = nt.get_mask_coords(mask) + data = nt.sample_images(self.rawdata_files, coords, use_dataobj=True) + + if stats == 'mean': + y_raw = data.mean(axis=0) + + # Filter and temporal pre-whiten the data + fdata = self.spm_filter(self.weight @ data) # spm_filter + + # Estimate the beta coefficients and residuals + beta = self.pinvX @ fdata + yhat = self.design_matrix @ beta + residuals = fdata - self.design_matrix @ beta + + yadj = yhat + residuals + + return + + + def spm_filter(self,data): """ Does high pass-filtering and temporal weighting of the data (indentical to spm_filter) From 22d3bbf6e249f2cf5472c83082a75165291f1898 Mon Sep 17 00:00:00 2001 From: Marco Emanuele <97095997+mnlmrc@users.noreply.github.com> Date: Wed, 5 Mar 2025 18:31:42 -0500 Subject: [PATCH 11/26] Update spm.py --- nitools/spm.py | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/nitools/spm.py b/nitools/spm.py index 6731a39..cd298c9 100644 --- a/nitools/spm.py +++ b/nitools/spm.py @@ -38,7 +38,7 @@ def get_info_from_spm_mat(self): """ try: # SPM = loadmat(f"{self.path}/SPM.mat", simplify_cells=True)['SPM'] - SPM = loadmat(f"{self.path}/SPM.mat", simplify_cells=True) + SPM = loadmat(f"{self.path}/SPM.mat", simplify_cells=True)['SPM'] spm_file_loaded_with_scipyio = True except Exception as e: print(f"Error loading SPM.mat file. The file was saved as mat-file version 7.3 (see https://www.mathworks.com/help/matlab/import_export/mat-file-versions.html). Try loading the mat-file with Matlab, and saving it as mat-file version 7.0 intead. Use this command: ") @@ -87,8 +87,10 @@ def relocate_file(self, fpath: str) -> str: """ norm_fpath = fpath.replace('\\', '/') base_path = dirname(self.path).replace('\\', '/') - c = norm_fpath.find('func') - return base_path + '/' + norm_fpath[c:] + c = norm_fpath.find('imaging_data') + g = base_path.find('glm') + return base_path[:g] + norm_fpath[c:] + # return norm_fpath def get_betas(self,mask): """ @@ -150,19 +152,21 @@ def region_getts(self, mask, stats='mean'): data = nt.sample_images(self.rawdata_files, coords, use_dataobj=True) if stats == 'mean': - y_raw = data.mean(axis=0) + y_raw = data.mean(axis=1) # Filter and temporal pre-whiten the data - fdata = self.spm_filter(self.weight @ data) # spm_filter + fdata = self.spm_filter(self.weight @ y_raw) # spm_filter # Estimate the beta coefficients and residuals beta = self.pinvX @ fdata - yhat = self.design_matrix @ beta + y_hat = self.design_matrix @ beta residuals = fdata - self.design_matrix @ beta - yadj = yhat + residuals + y_adj = y_hat + residuals - return + indx = self.reg_of_interest - 1 + + return y_raw, y_adj, y_hat, residuals, beta[indx, :], fdata From 52103758942d24187b348b8f0edbd9970f159b57 Mon Sep 17 00:00:00 2001 From: Marco Emanuele <97095997+mnlmrc@users.noreply.github.com> Date: Thu, 6 Mar 2025 16:50:09 -0500 Subject: [PATCH 12/26] Bug fixed in relocate_file Was not pointing at the right directory. --- nitools/spm.py | 24 ------------------------ 1 file changed, 24 deletions(-) diff --git a/nitools/spm.py b/nitools/spm.py index cd298c9..be0cbbc 100644 --- a/nitools/spm.py +++ b/nitools/spm.py @@ -145,30 +145,6 @@ def get_residuals(self,mask): info = {'reg_name': self.beta_names[indx], 'run_number': self.run_number[indx]} return residuals, beta[indx,:], info - def region_getts(self, mask, stats='mean'): - - # Sample the relevant time series data - coords = nt.get_mask_coords(mask) - data = nt.sample_images(self.rawdata_files, coords, use_dataobj=True) - - if stats == 'mean': - y_raw = data.mean(axis=1) - - # Filter and temporal pre-whiten the data - fdata = self.spm_filter(self.weight @ y_raw) # spm_filter - - # Estimate the beta coefficients and residuals - beta = self.pinvX @ fdata - y_hat = self.design_matrix @ beta - residuals = fdata - self.design_matrix @ beta - - y_adj = y_hat + residuals - - indx = self.reg_of_interest - 1 - - return y_raw, y_adj, y_hat, residuals, beta[indx, :], fdata - - def spm_filter(self,data): """ From 3c98de10a2d463d85b72dffb6b5ce942d0302355 Mon Sep 17 00:00:00 2001 From: Marco <97095997+mnlmrc@users.noreply.github.com> Date: Fri, 7 Mar 2025 11:50:10 -0500 Subject: [PATCH 13/26] added get_ciftis --- nitools/spm.py | 80 +++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 79 insertions(+), 1 deletion(-) diff --git a/nitools/spm.py b/nitools/spm.py index be0cbbc..33606fd 100644 --- a/nitools/spm.py +++ b/nitools/spm.py @@ -12,6 +12,8 @@ from __future__ import annotations from os.path import normpath, dirname import numpy as np +import nibabel as nb +from nibabel import cifti2 import nitools as nt from scipy.io import loadmat import pandas as pd @@ -199,4 +201,80 @@ def rerun_glm(self,data): indx = self.reg_of_interest-1 info = pd.DataFrame({'reg_name': self.beta_names[indx], 'run_number': self.run_number[indx]}) - return beta[indx,:], info, data_filt, data_hat, data_adj, residuals \ No newline at end of file + return beta[indx,:], info, data_filt, data_hat, data_adj, residuals + + def get_ciftis(self, mask=(None, None), TR=1000, extra=None): + """ + Creates Cifti images to store the output of rerun_glm from an ROI defined in both hemispheres + + Args: + mask ((str, str) tuple): + Path to the L and R (in this order) .nii ROI mask. + E.g. ('path/to/left_S1.nii', 'path/to/right_S1.nii') + TR (int): + Temporal resolution in milliseconds + extra (dict): + Extra information to store in Cifti images. + E.g., name of ROI: extra['name'] = 'S1' + Returns: + img_beta (CiftiImage): beta coefficients (PxQ), save as *.dscalar.nii + img_raw (CiftiImage): raw time series data (TxP), save as *.dtseries.nii + img_filt (CiftiImage): filtered time series data (TxP), save as *.dtseries.nii + img_hat (CiftiImage): predicted time series data (TxP), save as *.dtseries.nii + This is predicted only using regressors of interest (without the constant or other nuisance regressors) + img_adj (CiftiImage): adjusted time series data (TxP), save as *.dtseries.nii + This is filtered timeseries with constants and other nuisance regressors substrated out + img_res (CiftiImage): residuals (TxP), save as *.dtseries.nii + """ + + data = {name: [] for name in ["vox", "raw", "beta", "y_filt", "y_hat", "y_adj", "residuals"]} + struct = ['cortex_left', 'cortex_right'] + for m, s in zip(mask, struct): + if m is not None: + # load mask + mask_img = nb.load(m) + coords = nt.get_mask_coords(mask_img) + + # get raw data + raw_tmp = nt.sample_images(self.rawdata_files, coords) + data['raw'].append(raw_tmp) + + # rerun glm + beta_tmp, info, y_filt_tmp, y_hat_tmp, y_adj_tmp, residuals_tmp = self.rerun_glm(raw_tmp) + data['beta'].append(beta_tmp) + data['y_filt'].append(y_filt_tmp) + data['y_hat'].append(y_hat_tmp) + data['y_adj'].append(y_adj_tmp) + data['residuals'].append(residuals_tmp) + + data['vox'].append(cifti2.BrainModelAxis( + name=s, + voxel=nt.coords_to_voxelidxs(coords, mask_img).T, + affine=mask_img.affine, + volume_shape=mask_img.shape) + ) + + raw = np.hstack(data['raw']) + beta = np.hstack(data['beta']) + y_filt = np.hstack(data['y_filt']) + y_hat = np.hstack(data['y_hat']) + y_adj = np.hstack(data['y_adj']) + residuals = np.hstack(data['residuals']) + + vox = data['vox'][0] + data['vox'][1] + + series = cifti2.SeriesAxis(start=0, step=TR, size=raw.shape[0]) + + header = cifti2.Cifti2Header.from_axes((series, vox)) + img_raw = cifti2.Cifti2Image(dataobj=raw, header=header, extra=extra) + img_filt = cifti2.Cifti2Image(dataobj=y_filt, header=header, extra=extra) + img_hat = cifti2.Cifti2Image(dataobj=y_hat, header=header, extra=extra) + img_adj = cifti2.Cifti2Image(dataobj=y_adj, header=header, extra=extra) + img_res = cifti2.Cifti2Image(dataobj=residuals, header=header, extra=extra) + + scalar = cifti2.ScalarAxis(name=info.reg_name) + header = cifti2.Cifti2Header.from_axes((scalar, vox)) + img_beta = cifti2.Cifti2Image(dataobj=beta, header=header, extra=extra) + + return img_raw, img_beta, img_filt, img_hat, img_adj, img_res + \ No newline at end of file From e247f30a5ee726b1fa548ac222764020d631b4b3 Mon Sep 17 00:00:00 2001 From: Marco <97095997+mnlmrc@users.noreply.github.com> Date: Fri, 7 Mar 2025 13:04:50 -0500 Subject: [PATCH 14/26] added cut --- nitools/spm.py | 43 ++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 42 insertions(+), 1 deletion(-) diff --git a/nitools/spm.py b/nitools/spm.py index 33606fd..c08638c 100644 --- a/nitools/spm.py +++ b/nitools/spm.py @@ -277,4 +277,45 @@ def get_ciftis(self, mask=(None, None), TR=1000, extra=None): img_beta = cifti2.Cifti2Image(dataobj=beta, header=header, extra=extra) return img_raw, img_beta, img_filt, img_hat, img_adj, img_res - \ No newline at end of file + + +def cut(X, pre, at, post, padding='last'): + """ + Cut segment from signal X. + + Parameters: + X : np.ndarray + Input data (rows: time frames, cols: features) + pre : int + Number of frames before `at` + at : int or None + Frame index at which to cut. If None or NaN, returns NaNs. + post : int + Number of frames after `at` + padding : str, optional + Padding strategy when time is not available ('nan', 'zero', 'last'). Default is 'last'. + + Returns: + np.ndarray + The extracted segment of the trajectory. + """ + if at is None or np.isnan(at): + return np.full((pre + post + 1, X.shape[1]), np.nan) + + rows, cols = X.shape + start, end = max(0, at - pre), min(at + post + 1, rows) + y0 = X[start:end] + + pad_before, pad_after = max(0, pre - (at - start)), max(0, post - (rows - end)) + + if padding == 'nan': + y = np.pad(y0, ((pad_before, pad_after), (0, 0)), mode='constant', constant_values=np.nan) + elif padding == 'zero': + y = np.pad(y0, ((pad_before, pad_after), (0, 0)), mode='constant', constant_values=0) + elif padding == 'last': + y = np.pad(y0, ((pad_before, pad_after), (0, 0)), mode='edge') + else: + raise ValueError("Unknown padding option. Use: 'nan', 'last', 'zero'") + + return y + From 1a7bc1f66c0b9bd1729fb049d861a53b3dcd41d8 Mon Sep 17 00:00:00 2001 From: Marco <97095997+mnlmrc@users.noreply.github.com> Date: Fri, 7 Mar 2025 13:27:02 -0500 Subject: [PATCH 15/26] added avg_cut --- nitools/spm.py | 58 +++++++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 53 insertions(+), 5 deletions(-) diff --git a/nitools/spm.py b/nitools/spm.py index c08638c..a76f013 100644 --- a/nitools/spm.py +++ b/nitools/spm.py @@ -285,13 +285,13 @@ def cut(X, pre, at, post, padding='last'): Parameters: X : np.ndarray - Input data (rows: time frames, cols: features) + Input data (rows: voxels, cols: time samples) pre : int - Number of frames before `at` + N samples before `at` at : int or None - Frame index at which to cut. If None or NaN, returns NaNs. + Sample index at which to cut. If None or NaN, returns NaNs. post : int - Number of frames after `at` + N samples after `at` padding : str, optional Padding strategy when time is not available ('nan', 'zero', 'last'). Default is 'last'. @@ -304,7 +304,7 @@ def cut(X, pre, at, post, padding='last'): rows, cols = X.shape start, end = max(0, at - pre), min(at + post + 1, rows) - y0 = X[start:end] + y0 = X[:, start:end] pad_before, pad_after = max(0, pre - (at - start)), max(0, post - (rows - end)) @@ -319,3 +319,51 @@ def cut(X, pre, at, post, padding='last'): return y + +def avg_cut(X, pre, at, post, padding='last', stats='mean', axis=0, ResMS=None): + """ + Takes a vector of sample locations (at) and returns the signal (X) aligned and averaged around those locations + Args: + X (np.array): + Signal to cut. + pre (int): + N samples before cut + at (np.array or list): + Cut locations in samples + post (int): + N samples after cut + padding (str, optional): + Padding strategy when time is not available ('nan', 'zero', 'last'). Default is 'last'. + stats (str): + Sufficient statistic used for the area + mean: Mean of the voxels + whitemean: Mean of the voxel, spatially weighted by noise + axis (int): + Axis along which the sufficient statistic is applied. If X.shape = (n_voxels, n_samples) then axis=0 + ResMS (np.array): + Residual mean squares of shape (n_voxels,) only used if stats='prewhiten' for spatial prewhitening + + Returns: + + y (np.array): + Signal cut and averaged around locations at. + + """ + + if stats == 'mean': + X_avg = X.mean(axis=axis) + elif stats == 'whiten': + X_avg = (X / np.sqrt(ResMS)).mean(axis=axis) + else: + raise ValueError('Wrong argument for stats (permitted: mean, whiten)') + + y_tmp = [] + for a in at: + y_tmp.append(cut(X_avg, pre, a, post, padding)) + + y = np.vstack(y_tmp).mean(axis=0) + + return y + + + From cd82681801a889f6d7e5de174af062e1d04aee70 Mon Sep 17 00:00:00 2001 From: Marco Emanuele <97095997+mnlmrc@users.noreply.github.com> Date: Fri, 7 Mar 2025 16:39:59 -0500 Subject: [PATCH 16/26] Update spm.py --- nitools/spm.py | 76 -------------------------------------------------- 1 file changed, 76 deletions(-) diff --git a/nitools/spm.py b/nitools/spm.py index a76f013..c3f2dcd 100644 --- a/nitools/spm.py +++ b/nitools/spm.py @@ -203,82 +203,6 @@ def rerun_glm(self,data): return beta[indx,:], info, data_filt, data_hat, data_adj, residuals - def get_ciftis(self, mask=(None, None), TR=1000, extra=None): - """ - Creates Cifti images to store the output of rerun_glm from an ROI defined in both hemispheres - - Args: - mask ((str, str) tuple): - Path to the L and R (in this order) .nii ROI mask. - E.g. ('path/to/left_S1.nii', 'path/to/right_S1.nii') - TR (int): - Temporal resolution in milliseconds - extra (dict): - Extra information to store in Cifti images. - E.g., name of ROI: extra['name'] = 'S1' - Returns: - img_beta (CiftiImage): beta coefficients (PxQ), save as *.dscalar.nii - img_raw (CiftiImage): raw time series data (TxP), save as *.dtseries.nii - img_filt (CiftiImage): filtered time series data (TxP), save as *.dtseries.nii - img_hat (CiftiImage): predicted time series data (TxP), save as *.dtseries.nii - This is predicted only using regressors of interest (without the constant or other nuisance regressors) - img_adj (CiftiImage): adjusted time series data (TxP), save as *.dtseries.nii - This is filtered timeseries with constants and other nuisance regressors substrated out - img_res (CiftiImage): residuals (TxP), save as *.dtseries.nii - """ - - data = {name: [] for name in ["vox", "raw", "beta", "y_filt", "y_hat", "y_adj", "residuals"]} - struct = ['cortex_left', 'cortex_right'] - for m, s in zip(mask, struct): - if m is not None: - # load mask - mask_img = nb.load(m) - coords = nt.get_mask_coords(mask_img) - - # get raw data - raw_tmp = nt.sample_images(self.rawdata_files, coords) - data['raw'].append(raw_tmp) - - # rerun glm - beta_tmp, info, y_filt_tmp, y_hat_tmp, y_adj_tmp, residuals_tmp = self.rerun_glm(raw_tmp) - data['beta'].append(beta_tmp) - data['y_filt'].append(y_filt_tmp) - data['y_hat'].append(y_hat_tmp) - data['y_adj'].append(y_adj_tmp) - data['residuals'].append(residuals_tmp) - - data['vox'].append(cifti2.BrainModelAxis( - name=s, - voxel=nt.coords_to_voxelidxs(coords, mask_img).T, - affine=mask_img.affine, - volume_shape=mask_img.shape) - ) - - raw = np.hstack(data['raw']) - beta = np.hstack(data['beta']) - y_filt = np.hstack(data['y_filt']) - y_hat = np.hstack(data['y_hat']) - y_adj = np.hstack(data['y_adj']) - residuals = np.hstack(data['residuals']) - - vox = data['vox'][0] + data['vox'][1] - - series = cifti2.SeriesAxis(start=0, step=TR, size=raw.shape[0]) - - header = cifti2.Cifti2Header.from_axes((series, vox)) - img_raw = cifti2.Cifti2Image(dataobj=raw, header=header, extra=extra) - img_filt = cifti2.Cifti2Image(dataobj=y_filt, header=header, extra=extra) - img_hat = cifti2.Cifti2Image(dataobj=y_hat, header=header, extra=extra) - img_adj = cifti2.Cifti2Image(dataobj=y_adj, header=header, extra=extra) - img_res = cifti2.Cifti2Image(dataobj=residuals, header=header, extra=extra) - - scalar = cifti2.ScalarAxis(name=info.reg_name) - header = cifti2.Cifti2Header.from_axes((scalar, vox)) - img_beta = cifti2.Cifti2Image(dataobj=beta, header=header, extra=extra) - - return img_raw, img_beta, img_filt, img_hat, img_adj, img_res - - def cut(X, pre, at, post, padding='last'): """ Cut segment from signal X. From d2b35b1ddc28a33a73c5ce01bb691b40033a9c72 Mon Sep 17 00:00:00 2001 From: Marco <97095997+mnlmrc@users.noreply.github.com> Date: Sat, 8 Mar 2025 06:10:22 -0500 Subject: [PATCH 17/26] minor --- nitools/spm.py | 59 ++++++++++++++++++++++++-------------------------- 1 file changed, 28 insertions(+), 31 deletions(-) diff --git a/nitools/spm.py b/nitools/spm.py index c3f2dcd..7f8d0c3 100644 --- a/nitools/spm.py +++ b/nitools/spm.py @@ -43,10 +43,12 @@ def get_info_from_spm_mat(self): SPM = loadmat(f"{self.path}/SPM.mat", simplify_cells=True)['SPM'] spm_file_loaded_with_scipyio = True except Exception as e: - print(f"Error loading SPM.mat file. The file was saved as mat-file version 7.3 (see https://www.mathworks.com/help/matlab/import_export/mat-file-versions.html). Try loading the mat-file with Matlab, and saving it as mat-file version 7.0 intead. Use this command: ") + print( + f"Error loading SPM.mat file. The file was saved as mat-file version 7.3 (see https://www.mathworks.com/help/matlab/import_export/mat-file-versions.html). Try loading the mat-file with Matlab, and saving it as mat-file version 7.0 intead. Use this command: ") print(f"cp {self.path}/SPM.mat {self.path}/SPM.mat.backup") - print(f"matlab -nodesktop -nosplash -r \"load('{self.path}/SPM.mat'); save('{self.path}/SPM.mat', '-struct', 'SPM', '-v7'); exit\"") - + print( + f"matlab -nodesktop -nosplash -r \"load('{self.path}/SPM.mat'); save('{self.path}/SPM.mat', '-struct', 'SPM', '-v7'); exit\"") + # Get basic information from SPM.mat self.nscans = SPM['nscan'] self.nruns = len(self.nscans) @@ -56,7 +58,7 @@ def get_info_from_spm_mat(self): self.run_number = [] # Extract run number and condition name from SPM names for reg_name in SPM['xX']['name']: - s=reg_name.split(' ') + s = reg_name.split(' ') self.run_number.append(int(s[0][3:-1])) self.beta_names.append(s[1]) self.run_number = np.array(self.run_number) @@ -66,12 +68,10 @@ def get_info_from_spm_mat(self): # Get the necesssary matrices to reestimate the GLM for getting the residuals self.filter_matrices = [k['X0'] for k in SPM['xX']['K']] self.reg_of_interest = SPM['xX']['iC'] - self.design_matrix = SPM['xX']['xKXs']['X'] # Filtered and whitened design matrix - self.eff_df = SPM['xX']['erdf'] # Effective degrees of freedom - self.weight = SPM['xX']['W'] # Weight matrix for whitening - self.pinvX = SPM['xX']['pKX'] # Pseudo-inverse of (filtered and weighted) design matrix - - + self.design_matrix = SPM['xX']['xKXs']['X'] # Filtered and whitened design matrix + self.eff_df = SPM['xX']['erdf'] # Effective degrees of freedom + self.weight = SPM['xX']['W'] # Weight matrix for whitening + self.pinvX = SPM['xX']['pKX'] # Pseudo-inverse of (filtered and weighted) design matrix def relocate_file(self, fpath: str) -> str: """SPM file entries to current project directory and OS. @@ -94,7 +94,7 @@ def relocate_file(self, fpath: str) -> str: return base_path[:g] + norm_fpath[c:] # return norm_fpath - def get_betas(self,mask): + def get_betas(self, mask): """ Samples the beta images of an estimated SPM GLM at the mask locations also returns the ResMS values, and the obseration descriptors (run and condition) name @@ -113,16 +113,16 @@ def get_betas(self,mask): coords = nt.get_mask_coords(mask) # Generate the list of relevant beta images: - indx = self.reg_of_interest-1 + indx = self.reg_of_interest - 1 beta_files = [f'{self.path}/{self.beta_files[i]}' for i in indx] # Get the data from beta and ResMS files rms_file = [f'{self.path}/ResMS.nii'] - data = nt.sample_images(beta_files + rms_file,coords,use_dataobj=False) + data = nt.sample_images(beta_files + rms_file, coords, use_dataobj=False) # Return the data and the observation descriptors info = {'reg_name': self.beta_names[indx], 'run_number': self.run_number[indx]} - return data[:-1,:], data[-1,:], info + return data[:-1, :], data[-1, :], info - def get_residuals(self,mask): + def get_residuals(self, mask): """ Collects 3d images of a range of GLM residuals (typical SPM GLM results) and corresponding metadata @@ -133,22 +133,21 @@ def get_residuals(self,mask): """ # Sample the relevant time series data coords = nt.get_mask_coords(mask) - data = nt.sample_images(self.rawdata_files,coords,use_dataobj=True) + data = nt.sample_images(self.rawdata_files, coords, use_dataobj=True) # Filter and temporal pre-whiten the data - fdata= self.spm_filter(self.weight @ data) # spm_filter + fdata = self.spm_filter(self.weight @ data) # spm_filter # Estimate the beta coefficients and residuals beta = self.pinvX @ fdata residuals = fdata - self.design_matrix @ beta # Return the regressors of interest - indx = self.reg_of_interest-1 + indx = self.reg_of_interest - 1 info = {'reg_name': self.beta_names[indx], 'run_number': self.run_number[indx]} - return residuals, beta[indx,:], info + return residuals, beta[indx, :], info - - def spm_filter(self,data): + def spm_filter(self, data): """ Does high pass-filtering and temporal weighting of the data (indentical to spm_filter) @@ -158,17 +157,17 @@ def spm_filter(self,data): data (ndarray): 2d array of time series data (TxP) """ scan_bounds = self.nscans.cumsum() - scan_bounds = np.insert(scan_bounds,0,0) + scan_bounds = np.insert(scan_bounds, 0, 0) fdata = data.copy() for i in range(self.nruns): - Y = fdata[scan_bounds[i]:scan_bounds[i+1],:] + Y = fdata[scan_bounds[i]:scan_bounds[i + 1], :] if self.filter_matrices[i].size > 0: # Only apply with filter matrices are not empty Y = Y - self.filter_matrices[i] @ (self.filter_matrices[i].T @ Y) return fdata - def rerun_glm(self,data): + def rerun_glm(self, data): """ Re-estimate the GLM (without hyperparameter estimation) on new data @@ -186,7 +185,7 @@ def rerun_glm(self,data): """ # Filter and temporal pre-whiten the data - data_filt= self.spm_filter(self.weight @ data) # spm_filter + data_filt = self.spm_filter(self.weight @ data) # spm_filter # Estimate the beta coefficients and residuals beta = self.pinvX @ data_filt @@ -198,10 +197,11 @@ def rerun_glm(self,data): data_adj = data_hat + residuals # Return the regressors of interest (apart from the constant) - indx = self.reg_of_interest-1 + indx = self.reg_of_interest - 1 info = pd.DataFrame({'reg_name': self.beta_names[indx], 'run_number': self.run_number[indx]}) - - return beta[indx,:], info, data_filt, data_hat, data_adj, residuals + + return beta[indx, :], info, data_filt, data_hat, data_adj, residuals + def cut(X, pre, at, post, padding='last'): """ @@ -288,6 +288,3 @@ def avg_cut(X, pre, at, post, padding='last', stats='mean', axis=0, ResMS=None): y = np.vstack(y_tmp).mean(axis=0) return y - - - From 1ea1ecc9cf63cd5e4057e16744d615c6c3676994 Mon Sep 17 00:00:00 2001 From: Marco <97095997+mnlmrc@users.noreply.github.com> Date: Tue, 11 Mar 2025 09:36:23 -0400 Subject: [PATCH 18/26] mask as str, nifti img or ndarray in get_betas and get_residuals --- nitools/spm.py | 23 +++++++++++++++++++++-- 1 file changed, 21 insertions(+), 2 deletions(-) diff --git a/nitools/spm.py b/nitools/spm.py index 7f8d0c3..a3ffc6e 100644 --- a/nitools/spm.py +++ b/nitools/spm.py @@ -110,7 +110,16 @@ def get_betas(self, mask): obs_descriptors (dict): with lists reg_name and run_number (N long) """ - coords = nt.get_mask_coords(mask) + if isinstance(mask, str): + mask = nb.load(mask) + if isinstance(mask, nb.Nifti1Image): + coords = nt.get_mask_coords(mask) + if isinstance(mask, np.ndarray): + coords = mask + try: + assert coords.shape[0] == 3 + except AssertionError: + print(f"Error: Coords must have shape (3, N), but got {coords.shape} instead") # Generate the list of relevant beta images: indx = self.reg_of_interest - 1 @@ -132,7 +141,17 @@ def get_residuals(self, mask): res_range (range): range of to be saved residual images per run """ # Sample the relevant time series data - coords = nt.get_mask_coords(mask) + if isinstance(mask, str): + mask = nb.load(mask) + if isinstance(mask, nb.Nifti1Image): + coords = nt.get_mask_coords(mask) + if isinstance(mask, np.ndarray): + coords = mask + try: + assert coords.shape[0] == 3 + except AssertionError: + print(f"Error: Coords must have shape (3, N), but got {coords.shape} instead") + data = nt.sample_images(self.rawdata_files, coords, use_dataobj=True) # Filter and temporal pre-whiten the data From 236cc18b98d57232282d06ba399e8c67e2e28b55 Mon Sep 17 00:00:00 2001 From: Marco <97095997+mnlmrc@users.noreply.github.com> Date: Tue, 11 Mar 2025 09:54:55 -0400 Subject: [PATCH 19/26] Update spm.py --- nitools/spm.py | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/nitools/spm.py b/nitools/spm.py index a3ffc6e..0e7ee85 100644 --- a/nitools/spm.py +++ b/nitools/spm.py @@ -114,12 +114,10 @@ def get_betas(self, mask): mask = nb.load(mask) if isinstance(mask, nb.Nifti1Image): coords = nt.get_mask_coords(mask) - if isinstance(mask, np.ndarray): + if isinstance(mask, np.ndarray) and (mask.shape[0] == 3): coords = mask - try: - assert coords.shape[0] == 3 - except AssertionError: - print(f"Error: Coords must have shape (3, N), but got {coords.shape} instead") + else: + raise ValueError('Mask should be a 3xP array or coordinates, a nifti1image, or nifti file name') # Generate the list of relevant beta images: indx = self.reg_of_interest - 1 From b73f834450a0efd4d7ff0e801fae5085c4c21e92 Mon Sep 17 00:00:00 2001 From: Marco <97095997+mnlmrc@users.noreply.github.com> Date: Tue, 11 Mar 2025 09:56:00 -0400 Subject: [PATCH 20/26] Update spm.py --- nitools/spm.py | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/nitools/spm.py b/nitools/spm.py index 0e7ee85..f24f607 100644 --- a/nitools/spm.py +++ b/nitools/spm.py @@ -114,7 +114,7 @@ def get_betas(self, mask): mask = nb.load(mask) if isinstance(mask, nb.Nifti1Image): coords = nt.get_mask_coords(mask) - if isinstance(mask, np.ndarray) and (mask.shape[0] == 3): + elif isinstance(mask, np.ndarray) and (mask.shape[0] == 3): coords = mask else: raise ValueError('Mask should be a 3xP array or coordinates, a nifti1image, or nifti file name') @@ -143,12 +143,10 @@ def get_residuals(self, mask): mask = nb.load(mask) if isinstance(mask, nb.Nifti1Image): coords = nt.get_mask_coords(mask) - if isinstance(mask, np.ndarray): + elif isinstance(mask, np.ndarray) and (mask.shape[0] == 3): coords = mask - try: - assert coords.shape[0] == 3 - except AssertionError: - print(f"Error: Coords must have shape (3, N), but got {coords.shape} instead") + else: + raise ValueError('Mask should be a 3xP array or coordinates, a nifti1image, or nifti file name') data = nt.sample_images(self.rawdata_files, coords, use_dataobj=True) From a8a36df5f4d973ccabfecd206bc6815f4f4a6f5e Mon Sep 17 00:00:00 2001 From: Marco Emanuele <97095997+mnlmrc@users.noreply.github.com> Date: Thu, 13 Mar 2025 09:11:45 -0400 Subject: [PATCH 21/26] Corrected bug in cut --- nitools/spm.py | 25 +++++++++---------------- 1 file changed, 9 insertions(+), 16 deletions(-) diff --git a/nitools/spm.py b/nitools/spm.py index f24f607..44bab2d 100644 --- a/nitools/spm.py +++ b/nitools/spm.py @@ -224,7 +224,7 @@ def cut(X, pre, at, post, padding='last'): Parameters: X : np.ndarray - Input data (rows: voxels, cols: time samples) + Input data (rows: time samples, cols: cols) pre : int N samples before `at` at : int or None @@ -243,23 +243,23 @@ def cut(X, pre, at, post, padding='last'): rows, cols = X.shape start, end = max(0, at - pre), min(at + post + 1, rows) - y0 = X[:, start:end] + y0 = X[start:end, :] - pad_before, pad_after = max(0, pre - (at - start)), max(0, post - (rows - end)) + pad_before, pad_after = max(0, pre - (at - start)), max(0, post - (rows - at - 1)) if padding == 'nan': - y = np.pad(y0, ((pad_before, pad_after), (0, 0)), mode='constant', constant_values=np.nan) + y = np.pad(y0, ((pad_before, pad_after),(0, 0), ), mode='constant', constant_values=np.nan) elif padding == 'zero': - y = np.pad(y0, ((pad_before, pad_after), (0, 0)), mode='constant', constant_values=0) + y = np.pad(y0, ((pad_before, pad_after), (0, 0), ), mode='constant', constant_values=0) elif padding == 'last': - y = np.pad(y0, ((pad_before, pad_after), (0, 0)), mode='edge') + y = np.pad(y0, ( (pad_before, pad_after), (0, 0), ), mode='edge') else: raise ValueError("Unknown padding option. Use: 'nan', 'last', 'zero'") return y -def avg_cut(X, pre, at, post, padding='last', stats='mean', axis=0, ResMS=None): +def avg_cut(X, pre, at, post, padding='last'): """ Takes a vector of sample locations (at) and returns the signal (X) aligned and averaged around those locations Args: @@ -289,17 +289,10 @@ def avg_cut(X, pre, at, post, padding='last', stats='mean', axis=0, ResMS=None): """ - if stats == 'mean': - X_avg = X.mean(axis=axis) - elif stats == 'whiten': - X_avg = (X / np.sqrt(ResMS)).mean(axis=axis) - else: - raise ValueError('Wrong argument for stats (permitted: mean, whiten)') - y_tmp = [] for a in at: - y_tmp.append(cut(X_avg, pre, a, post, padding)) + y_tmp.append(cut(X, pre, a, post, padding)) - y = np.vstack(y_tmp).mean(axis=0) + y = np.array(y_tmp).mean(axis=0) return y From e6f5bee3e025adb4bbf8a4e45890f3a51d7de9fe Mon Sep 17 00:00:00 2001 From: Marco Emanuele <97095997+mnlmrc@users.noreply.github.com> Date: Tue, 18 Mar 2025 17:38:41 -0400 Subject: [PATCH 22/26] Update spm.py --- nitools/spm.py | 327 +++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 327 insertions(+) diff --git a/nitools/spm.py b/nitools/spm.py index 44bab2d..b6aee1d 100644 --- a/nitools/spm.py +++ b/nitools/spm.py @@ -17,6 +17,7 @@ import nitools as nt from scipy.io import loadmat import pandas as pd +from scipy.stats import gamma class SpmGlm: @@ -72,6 +73,10 @@ def get_info_from_spm_mat(self): self.eff_df = SPM['xX']['erdf'] # Effective degrees of freedom self.weight = SPM['xX']['W'] # Weight matrix for whitening self.pinvX = SPM['xX']['pKX'] # Pseudo-inverse of (filtered and weighted) design matrix + self.bf = SPM['xBF']['bf'] + self.U = SPM['xX']['U'] + self.Volterra = SPM['xBF']['Volterra'] + def relocate_file(self, fpath: str) -> str: """SPM file entries to current project directory and OS. @@ -217,6 +222,83 @@ def rerun_glm(self, data): return beta[indx, :], info, data_filt, data_hat, data_adj, residuals + def spmj_glm_convolve(self): + """ + Re-convolves the SPM structure with a new basis function. + + Parameters: + SPM : dict + SPM design structure. + + Returns: + SPM : dict + Modified SPM structure. + """ + + Xx = np.array([]) + Xb = np.array([]) + iCs, iCc, iCb, iN = [], [], [], [] + + num_basis = self.bf.shape[1] + num_scan = self.nscans + + for s in range(num_scan): + # Number of scans for this session + k = SPM["nscan"][s] + + # Get stimulus functions U + U = SPM["Sess"][s]["U"] + num_cond = len(U) + + # Convolve stimulus functions with basis functions + X, Xn, Fc = spm_Volterra(U, self.bf, self.Volterra) + + # Resample regressors at acquisition times (32 bin offset) + X = X[np.arange(k) * SPM["xBF"]["T"] + SPM["xBF"]["T0"] + 32, :] + + # Orthogonalize within trial type + for i in range(len(Fc)): + X[:, Fc[i]["i"]] = spm_orth(X[:, Fc[i]["i"]]) + + # Get user-specified regressors + C = SPM["Sess"][s]["C"]["C"] + num_reg = C.shape[1] + X = np.hstack([X, spm_detrend(C)]) + + # Store session info + if Xx.size == 0: + Xx = X + Xb = np.ones((k, 1)) + else: + Xx = blkdiag([Xx, X]) + Xb = blkdiag([Xb, np.ones((k, 1))]) + + iCs.extend([s + 1] * (X.shape[1] + num_reg)) + iCc.extend(np.kron(np.arange(1, num_cond + 1), np.ones(num_basis, dtype=int)).tolist() + [0] * num_reg) + iCb.extend(np.kron(np.ones(num_cond, dtype=int), np.arange(1, num_basis + 1)).tolist() + [0] * num_reg) + iN.extend([0] * (num_cond * num_basis) + [1] * num_reg) + + # Finalize design matrix + SPM["xX"]["X"] = np.hstack([Xx, Xb]) + + if "W" in SPM["xX"]: + SPM["xX"]["xKXs"] = spm_sp("Set", spm_filter(SPM["xX"]["K"], SPM["xX"]["W"] @ SPM["xX"]["X"])) + else: + SPM["xX"]["xKXs"] = spm_sp("Set", spm_filter(SPM["xX"]["K"], SPM["xX"]["X"])) + + SPM["xX"]["xKXs"]["X"] = SPM["xX"]["xKXs"]["X"].astype(float) + SPM["xX"]["pKX"] = spm_sp("x-", SPM["xX"]["xKXs"]) + + # Indices for regressors + SPM["xX"]["iC"] = list(range(Xx.shape[1])) + SPM["xX"]["iB"] = list(range(Xb.shape[1])) + Xx.shape[1] + SPM["xX"]["iCs"] = iCs + list(range(1, num_scan + 1)) + SPM["xX"]["iCc"] = iCc + [0] * num_scan + SPM["xX"]["iCb"] = iCb + [0] * num_scan + SPM["xX"]["iN"] = iN + [2] * num_scan + + return SPM + def cut(X, pre, at, post, padding='last'): """ @@ -296,3 +378,248 @@ def avg_cut(X, pre, at, post, padding='last'): y = np.array(y_tmp).mean(axis=0) return y + + +def blkdiag(*matrices): + """ + Constructs a block diagonal matrix from multiple input matrices. + + Parameters: + *matrices : list of ndarray + Matrices to be placed on the block diagonal. + + Returns: + y : ndarray + Block diagonal concatenated matrix. + """ + if len(matrices) == 0: + return np.array([]) + + # Ensure all inputs are 2D arrays + matrices = [np.atleast_2d(m) for m in matrices] + + # Determine final shape + rows = sum(m.shape[0] for m in matrices) + cols = sum(m.shape[1] for m in matrices) + + # Preallocate result matrix with zeros + y = np.zeros((rows, cols), dtype=matrices[0].dtype) + + # Fill the block diagonal + r_offset, c_offset = 0, 0 + for m in matrices: + r, c = m.shape + y[r_offset:r_offset + r, c_offset:c_offset + c] = m + r_offset += r + c_offset += c + + return y + + +def spm_detrend(x, p=0): + """ + Polynomial detrending over columns. + + Parameters: + x : ndarray or list of ndarrays + Data matrix (or list of matrices). + p : int, optional + Order of polynomial (default: 0, i.e., mean subtraction). + + Returns: + y : ndarray or list of ndarrays + Detrended data matrix. + """ + + # Handle case where x is a list (equivalent to cell arrays in MATLAB) + if isinstance(x, list): + return [spm_detrend(xi, p) for xi in x] + + # Check dimensions + m, n = x.shape + if m == 0 or n == 0: + return np.array([]) + + # Mean subtraction (order 0) + if p == 0: + return x - np.mean(x, axis=0, keepdims=True) + + # Polynomial adjustment + G = np.zeros((m, p + 1)) + for i in range(p + 1): + G[:, i] = (np.arange(1, m + 1) ** i) + + y = x - G @ np.linalg.pinv(G) @ x + return y + + +def spm_orth(X, OPT='pad'): + """ + Recursive Gram-Schmidt orthogonalisation of basis functions + + Parameters: + X : ndarray + Input matrix + OPT : str, optional + 'norm' for Euclidean normalization + 'pad' for zero padding of null space (default) + + Returns: + X : ndarray + Orthogonalized matrix + """ + # Turn off warnings (equivalent to MATLAB warning('off','all')) + np.seterr(all='ignore') + + n, m = X.shape + X = X[:, np.any(X, axis=0)] # Remove zero columns + rankX = np.linalg.matrix_rank(X) + + try: + x = X[:, [0]] + j = [0] + + for i in range(1, X.shape[1]): + D = X[:, [i]] + D = D - x @ np.linalg.pinv(x) @ D + + if np.linalg.norm(D, 1) > np.exp(-32): + x = np.hstack((x, D)) + j.append(i) + + if len(j) == rankX: + break + except: + x = np.zeros((n, 0)) + j = [] + + # Restore warnings + np.seterr(all='warn') + + # Normalization if requested + if OPT == 'pad': + X_out = np.zeros((n, m)) + X_out[:, j] = x + elif OPT == 'norm': + X_out = x / np.linalg.norm(x, axis=0, keepdims=True) + else: + X_out = x + + return X_out + + +def spm_Volterra(U, bf, V=1): + """ + Generalized convolution of inputs (U) with basis set (bf) + + Parameters: + U : list of dict + Input structure array containing 'u' (time series) and 'name' (labels) + bf : ndarray + Basis functions + V : int, optional + Order of Volterra expansion (default: 1) + + Returns: + X : ndarray + Design Matrix + Xname : list + Names of regressors (columns) in X + Fc : list of dict + Contains indices and names for each input + """ + X = [] + Xname = [] + Fc = [] + + # First-order terms + for i, u_dict in enumerate(U): + ind = [] + ip = [] + for k in range(u_dict['u'].shape[1]): + for p in range(bf.shape[1]): + x = u_dict['u'][:, k] + d = np.arange(len(x)) + x = np.convolve(x, bf[:, p], mode='full')[:len(d)] + X.append(x) + + Xname.append(f"{u_dict['name'][k]}*bf({p + 1})") + ind.append(len(X)) + ip.append(k + 1) + + Fc.append({'i': ind, 'name': u_dict['name'][0], 'p': ip}) + + X = np.column_stack(X) if X else np.empty((len(U[0]['u']), 0)) + + # Return if first order + if V == 1: + return X, Xname, Fc + + # Second-order terms + for i in range(len(U)): + for j in range(i, len(U)): + ind = [] + ip = [] + for p in range(bf.shape[1]): + for q in range(bf.shape[1]): + x = U[i]['u'][:, 0] + y = U[j]['u'][:, 0] + x = np.convolve(x, bf[:, p], mode='full')[:len(d)] + y = np.convolve(y, bf[:, q], mode='full')[:len(d)] + X = np.column_stack((X, x * y)) + + Xname.append(f"{U[i]['name'][0]}*bf({p + 1})x{U[j]['name'][0]}*bf({q + 1})") + ind.append(X.shape[1]) + ip.append(1) + + Fc.append({'i': ind, 'name': f"{U[i]['name'][0]}x{U[j]['name'][0]}", 'p': ip}) + + return X, Xname, Fc + + +def spm_hrf(RT, P=None, T=16): + """ + Haemodynamic response function (HRF) + + Parameters: + RT : float + Scan repeat time (TR) + P : list or ndarray, optional + Parameters of the response function (two Gamma functions), defaults to [6, 16, 1, 1, 6, 0, 32] + T : int, optional + Microtime resolution (default: 16) + + Returns: + hrf : ndarray + Hemodynamic response function + p : ndarray + Parameters of the response function + """ + # Default parameters if not provided + if P is None: + P = [6, 16, 1, 1, 6, 0, 32] + + # Ensure P has the correct length + p = np.array([6, 16, 1, 1, 6, 0, 32]) + p[:len(P)] = P + + # Microtime resolution + dt = RT / T + u = np.arange(0, np.ceil(p[6] / dt) + 1) - p[5] / dt + + # Modelled hemodynamic response function (mixture of two gamma distributions) + hrf = (gamma.pdf(u, p[0] / p[2], scale=dt / p[2]) - + gamma.pdf(u, p[1] / p[3], scale=dt / p[3]) / p[4]) + + # Downsample to TR resolution + hrf = hrf[np.floor(np.arange(0, p[6] / RT + 1) * T).astype(int)] + + # Normalize HRF + hrf = hrf / np.sum(hrf) + + return hrf, p + + +SPM = SpmGlm('/cifs/diedrichsen/data/SensoriMotorPrediction/smp2/glm12/subj103/') +SPM.get_info_from_spm_mat() + From a12aca46268bd7e900d129361ee2b560b6e9ced6 Mon Sep 17 00:00:00 2001 From: Marco Emanuele <97095997+mnlmrc@users.noreply.github.com> Date: Tue, 18 Mar 2025 18:43:02 -0400 Subject: [PATCH 23/26] Update spm.py --- nitools/spm.py | 51 ++++++++++++++++++++++++++++++++------------------ 1 file changed, 33 insertions(+), 18 deletions(-) diff --git a/nitools/spm.py b/nitools/spm.py index b6aee1d..bb0ec9e 100644 --- a/nitools/spm.py +++ b/nitools/spm.py @@ -13,8 +13,7 @@ from os.path import normpath, dirname import numpy as np import nibabel as nb -from nibabel import cifti2 -import nitools as nt +# import nitools as nt from scipy.io import loadmat import pandas as pd from scipy.stats import gamma @@ -73,9 +72,12 @@ def get_info_from_spm_mat(self): self.eff_df = SPM['xX']['erdf'] # Effective degrees of freedom self.weight = SPM['xX']['W'] # Weight matrix for whitening self.pinvX = SPM['xX']['pKX'] # Pseudo-inverse of (filtered and weighted) design matrix + self.X = SPM["xX"]["X"] self.bf = SPM['xBF']['bf'] - self.U = SPM['xX']['U'] self.Volterra = SPM['xBF']['Volterra'] + self.Sess = SPM['Sess'] + self.T = SPM["xBF"]["T"] + self.T0 = SPM["xBF"]["T0"] def relocate_file(self, fpath: str) -> str: @@ -239,26 +241,29 @@ def spmj_glm_convolve(self): Xb = np.array([]) iCs, iCc, iCb, iN = [], [], [], [] - num_basis = self.bf.shape[1] + if self.bf.ndim == 2: + num_basis = self.bf.shape[1] + else: + num_basis = 1 num_scan = self.nscans - for s in range(num_scan): + for s in range(len(self.Sess)): # Number of scans for this session - k = SPM["nscan"][s] + k = num_scan[s] # Get stimulus functions U - U = SPM["Sess"][s]["U"] + U = self.Sess[s]["U"] num_cond = len(U) # Convolve stimulus functions with basis functions X, Xn, Fc = spm_Volterra(U, self.bf, self.Volterra) # Resample regressors at acquisition times (32 bin offset) - X = X[np.arange(k) * SPM["xBF"]["T"] + SPM["xBF"]["T0"] + 32, :] + X = X[np.arange(k) * self.T + self.T0 + 32, :] # Orthogonalize within trial type for i in range(len(Fc)): - X[:, Fc[i]["i"]] = spm_orth(X[:, Fc[i]["i"]]) + X[:, Fc[i]["i"][0] - 1] = spm_orth(X[:, Fc[i]["i"][0] - 1]) # Get user-specified regressors C = SPM["Sess"][s]["C"]["C"] @@ -279,12 +284,12 @@ def spmj_glm_convolve(self): iN.extend([0] * (num_cond * num_basis) + [1] * num_reg) # Finalize design matrix - SPM["xX"]["X"] = np.hstack([Xx, Xb]) + self.X = np.hstack([Xx, Xb]) if "W" in SPM["xX"]: - SPM["xX"]["xKXs"] = spm_sp("Set", spm_filter(SPM["xX"]["K"], SPM["xX"]["W"] @ SPM["xX"]["X"])) + SPM["xX"]["xKXs"] = spm_sp("Set", self.spm_filter(self.weight @ SPM["xX"]["X"])) else: - SPM["xX"]["xKXs"] = spm_sp("Set", spm_filter(SPM["xX"]["K"], SPM["xX"]["X"])) + SPM["xX"]["xKXs"] = spm_sp("Set", self.spm_filter(SPM["xX"]["X"])) SPM["xX"]["xKXs"]["X"] = SPM["xX"]["xKXs"]["X"].astype(float) SPM["xX"]["pKX"] = spm_sp("x-", SPM["xX"]["xKXs"]) @@ -532,15 +537,25 @@ def spm_Volterra(U, bf, V=1): Xname = [] Fc = [] + if bf.ndim == 2: + num_basis = bf.shape[1] + else: + num_basis = 1 + # First-order terms for i, u_dict in enumerate(U): ind = [] ip = [] for k in range(u_dict['u'].shape[1]): - for p in range(bf.shape[1]): - x = u_dict['u'][:, k] - d = np.arange(len(x)) - x = np.convolve(x, bf[:, p], mode='full')[:len(d)] + for p in range(num_basis): + if num_basis > 1: + x = u_dict['u'].todense()[:, k] + d = np.arange(x.shape[0]) + x = np.convolve(x, bf[:, p], mode='full')[:len(d)] + else: + x = u_dict['u'].todense()[:, k] + d = np.arange(x.shape[0]) + x = np.convolve(x, bf, mode='full')[:len(d)] X.append(x) Xname.append(f"{u_dict['name'][k]}*bf({p + 1})") @@ -620,6 +635,6 @@ def spm_hrf(RT, P=None, T=16): return hrf, p -SPM = SpmGlm('/cifs/diedrichsen/data/SensoriMotorPrediction/smp2/glm12/subj103/') +SPM = SpmGlm('/Volumes/diedrichsen_data$/data/SensoriMotorPrediction/smp2/glm12/subj103/') SPM.get_info_from_spm_mat() - +SPM.spmj_glm_convolve() From c0cb433934459d35eee0478242f72f9bdc885dc7 Mon Sep 17 00:00:00 2001 From: Marco Emanuele <97095997+mnlmrc@users.noreply.github.com> Date: Wed, 26 Mar 2025 16:29:56 -0400 Subject: [PATCH 24/26] Added glm convolve --- nitools/spm.py | 103 +++++++++++++++++++++++++++---------------------- 1 file changed, 56 insertions(+), 47 deletions(-) diff --git a/nitools/spm.py b/nitools/spm.py index bb0ec9e..0179fcc 100644 --- a/nitools/spm.py +++ b/nitools/spm.py @@ -13,10 +13,11 @@ from os.path import normpath, dirname import numpy as np import nibabel as nb -# import nitools as nt +import nitools as nt from scipy.io import loadmat import pandas as pd from scipy.stats import gamma +from scipy.special import gammaln class SpmGlm: @@ -43,6 +44,7 @@ def get_info_from_spm_mat(self): SPM = loadmat(f"{self.path}/SPM.mat", simplify_cells=True)['SPM'] spm_file_loaded_with_scipyio = True except Exception as e: + print(e) print( f"Error loading SPM.mat file. The file was saved as mat-file version 7.3 (see https://www.mathworks.com/help/matlab/import_export/mat-file-versions.html). Try loading the mat-file with Matlab, and saving it as mat-file version 7.0 intead. Use this command: ") print(f"cp {self.path}/SPM.mat {self.path}/SPM.mat.backup") @@ -224,7 +226,7 @@ def rerun_glm(self, data): return beta[indx, :], info, data_filt, data_hat, data_adj, residuals - def spmj_glm_convolve(self): + def convolve_glm(self, bf): """ Re-convolves the SPM structure with a new basis function. @@ -237,14 +239,15 @@ def spmj_glm_convolve(self): Modified SPM structure. """ + self.bf = bf Xx = np.array([]) Xb = np.array([]) - iCs, iCc, iCb, iN = [], [], [], [] + # iCs, iCc, iCb, iN = [], [], [], [] - if self.bf.ndim == 2: - num_basis = self.bf.shape[1] - else: - num_basis = 1 + # if self.bf.ndim == 2: + # num_basis = self.bf.shape[1] + # else: + # num_basis = 1 num_scan = self.nscans for s in range(len(self.Sess)): @@ -266,9 +269,12 @@ def spmj_glm_convolve(self): X[:, Fc[i]["i"][0] - 1] = spm_orth(X[:, Fc[i]["i"][0] - 1]) # Get user-specified regressors - C = SPM["Sess"][s]["C"]["C"] - num_reg = C.shape[1] - X = np.hstack([X, spm_detrend(C)]) + C = self.Sess[s]["C"]["C"] + if C.size > 0: + num_reg = C.shape[1] + X = np.hstack([X, spm_detrend(C)]) + else: + num_reg = 0 # Store session info if Xx.size == 0: @@ -278,31 +284,32 @@ def spmj_glm_convolve(self): Xx = blkdiag([Xx, X]) Xb = blkdiag([Xb, np.ones((k, 1))]) - iCs.extend([s + 1] * (X.shape[1] + num_reg)) - iCc.extend(np.kron(np.arange(1, num_cond + 1), np.ones(num_basis, dtype=int)).tolist() + [0] * num_reg) - iCb.extend(np.kron(np.ones(num_cond, dtype=int), np.arange(1, num_basis + 1)).tolist() + [0] * num_reg) - iN.extend([0] * (num_cond * num_basis) + [1] * num_reg) + # iCs.extend([s + 1] * (X.shape[1] + num_reg)) + # iCc.extend(np.kron(np.arange(1, num_cond + 1), np.ones(num_basis, dtype=int)).tolist() + [0] * num_reg) + # iCb.extend(np.kron(np.ones(num_cond, dtype=int), np.arange(1, num_basis + 1)).tolist() + [0] * num_reg) + # iN.extend([0] * (num_cond * num_basis) + [1] * num_reg) # Finalize design matrix self.X = np.hstack([Xx, Xb]) - if "W" in SPM["xX"]: - SPM["xX"]["xKXs"] = spm_sp("Set", self.spm_filter(self.weight @ SPM["xX"]["X"])) + # Compute weighted and filtered design matrix + if hasattr(self, "weight"): + self.design_matrix = self.spm_filter(self.weight @ self.X) else: - SPM["xX"]["xKXs"] = spm_sp("Set", self.spm_filter(SPM["xX"]["X"])) + self.design_matrix = self.spm_filter(self.X) - SPM["xX"]["xKXs"]["X"] = SPM["xX"]["xKXs"]["X"].astype(float) - SPM["xX"]["pKX"] = spm_sp("x-", SPM["xX"]["xKXs"]) + self.design_matrix = self.design_matrix.astype(float) - # Indices for regressors - SPM["xX"]["iC"] = list(range(Xx.shape[1])) - SPM["xX"]["iB"] = list(range(Xb.shape[1])) + Xx.shape[1] - SPM["xX"]["iCs"] = iCs + list(range(1, num_scan + 1)) - SPM["xX"]["iCc"] = iCc + [0] * num_scan - SPM["xX"]["iCb"] = iCb + [0] * num_scan - SPM["xX"]["iN"] = iN + [2] * num_scan + # Compute pseudoinverse of weighted and filtered design matrix + self.pinvX = np.linalg.inv(self.design_matrix.T @ self.design_matrix) @ self.design_matrix.T - return SPM + # # Indices for regressors + # SPM["xX"]["iC"] = list(range(Xx.shape[1])) + # SPM["xX"]["iB"] = list(range(Xb.shape[1])) + Xx.shape[1] + # SPM["xX"]["iCs"] = iCs + list(range(1, num_scan + 1)) + # SPM["xX"]["iCc"] = iCc + [0] * num_scan + # SPM["xX"]["iCb"] = iCb + [0] * num_scan + # SPM["xX"]["iN"] = iN + [2] * num_scan def cut(X, pre, at, post, padding='last'): @@ -380,12 +387,10 @@ def avg_cut(X, pre, at, post, padding='last'): for a in at: y_tmp.append(cut(X, pre, a, post, padding)) - y = np.array(y_tmp).mean(axis=0) + return np.array(y_tmp) - return y - -def blkdiag(*matrices): +def blkdiag(matrices): """ Constructs a block diagonal matrix from multiple input matrices. @@ -400,12 +405,9 @@ def blkdiag(*matrices): if len(matrices) == 0: return np.array([]) - # Ensure all inputs are 2D arrays - matrices = [np.atleast_2d(m) for m in matrices] - # Determine final shape - rows = sum(m.shape[0] for m in matrices) - cols = sum(m.shape[1] for m in matrices) + rows = np.array([m.shape[0] for m in matrices]).sum() + cols = np.array([m.shape[1] for m in matrices]).sum() # Preallocate result matrix with zeros y = np.zeros((rows, cols), dtype=matrices[0].dtype) @@ -476,6 +478,8 @@ def spm_orth(X, OPT='pad'): # Turn off warnings (equivalent to MATLAB warning('off','all')) np.seterr(all='ignore') + if X.ndim==1: + X = X[:, np.newaxis] n, m = X.shape X = X[:, np.any(X, axis=0)] # Remove zero columns rankX = np.linalg.matrix_rank(X) @@ -510,6 +514,9 @@ def spm_orth(X, OPT='pad'): else: X_out = x + # drop extra dimensions if input was a vector + X_out = np.squeeze(X_out) + return X_out @@ -555,7 +562,9 @@ def spm_Volterra(U, bf, V=1): else: x = u_dict['u'].todense()[:, k] d = np.arange(x.shape[0]) + x = np.asarray(x).flatten() x = np.convolve(x, bf, mode='full')[:len(d)] + x = x[:, None] X.append(x) Xname.append(f"{u_dict['name'][k]}*bf({p + 1})") @@ -598,7 +607,7 @@ def spm_hrf(RT, P=None, T=16): Parameters: RT : float - Scan repeat time (TR) + Scan repeat time in seconds (TR) P : list or ndarray, optional Parameters of the response function (two Gamma functions), defaults to [6, 16, 1, 1, 6, 0, 32] T : int, optional @@ -619,22 +628,22 @@ def spm_hrf(RT, P=None, T=16): p[:len(P)] = P # Microtime resolution + RT = RT / T dt = RT / T - u = np.arange(0, np.ceil(p[6] / dt) + 1) - p[5] / dt + t = np.linspace(0, p[6], np.ceil(1 + p[6] / dt).astype(int)) - p[5] + + peak = (t ** p[0]) * np.exp(-t / p[2]) + peak /= np.max(peak) # Normalize + + undershoot = (t ** p[1]) * np.exp(-t / p[3]) + undershoot /= np.max(undershoot) # Normalize - # Modelled hemodynamic response function (mixture of two gamma distributions) - hrf = (gamma.pdf(u, p[0] / p[2], scale=dt / p[2]) - - gamma.pdf(u, p[1] / p[3], scale=dt / p[3]) / p[4]) + hrf = peak - (undershoot / p[4]) # Downsample to TR resolution - hrf = hrf[np.floor(np.arange(0, p[6] / RT + 1) * T).astype(int)] + hrf = hrf[np.floor(np.arange(0, p[6] / RT) * T).astype(int)] # Normalize HRF hrf = hrf / np.sum(hrf) return hrf, p - - -SPM = SpmGlm('/Volumes/diedrichsen_data$/data/SensoriMotorPrediction/smp2/glm12/subj103/') -SPM.get_info_from_spm_mat() -SPM.spmj_glm_convolve() From a68ec492d1bbd3ca9239722d4a2a9fb7f19207e3 Mon Sep 17 00:00:00 2001 From: Marco Emanuele <97095997+mnlmrc@users.noreply.github.com> Date: Wed, 26 Mar 2025 17:14:19 -0400 Subject: [PATCH 25/26] Update spm.py --- nitools/spm.py | 62 ++++++++++++++++++++++++++++++++++++-------------- 1 file changed, 45 insertions(+), 17 deletions(-) diff --git a/nitools/spm.py b/nitools/spm.py index 0179fcc..39cc3d7 100644 --- a/nitools/spm.py +++ b/nitools/spm.py @@ -230,13 +230,9 @@ def convolve_glm(self, bf): """ Re-convolves the SPM structure with a new basis function. - Parameters: - SPM : dict - SPM design structure. + Args: + bf (ndarray): new basis function (output of spm_hrf): - Returns: - SPM : dict - Modified SPM structure. """ self.bf = bf @@ -259,20 +255,20 @@ def convolve_glm(self, bf): num_cond = len(U) # Convolve stimulus functions with basis functions - X, Xn, Fc = spm_Volterra(U, self.bf, self.Volterra) + X, Xn, Fc = _spm_Volterra(U, self.bf, self.Volterra) # Resample regressors at acquisition times (32 bin offset) X = X[np.arange(k) * self.T + self.T0 + 32, :] # Orthogonalize within trial type for i in range(len(Fc)): - X[:, Fc[i]["i"][0] - 1] = spm_orth(X[:, Fc[i]["i"][0] - 1]) + X[:, Fc[i]["i"][0] - 1] = _spm_orth(X[:, Fc[i]["i"][0] - 1]) # Get user-specified regressors C = self.Sess[s]["C"]["C"] if C.size > 0: num_reg = C.shape[1] - X = np.hstack([X, spm_detrend(C)]) + X = np.hstack([X, _spm_detrend(C)]) else: num_reg = 0 @@ -281,8 +277,8 @@ def convolve_glm(self, bf): Xx = X Xb = np.ones((k, 1)) else: - Xx = blkdiag([Xx, X]) - Xb = blkdiag([Xb, np.ones((k, 1))]) + Xx = _blkdiag([Xx, X]) + Xb = _blkdiag([Xb, np.ones((k, 1))]) # iCs.extend([s + 1] * (X.shape[1] + num_reg)) # iCc.extend(np.kron(np.arange(1, num_cond + 1), np.ones(num_basis, dtype=int)).tolist() + [0] * num_reg) @@ -311,6 +307,38 @@ def convolve_glm(self, bf): # SPM["xX"]["iCb"] = iCb + [0] * num_scan # SPM["xX"]["iN"] = iN + [2] * num_scan + def update_hrf_params(self, P, mask_img): + """ + Updates the HRF parameters of the SPM structure and return the timeseries calculated with the new HRF parameters. + Args: + P (ndarray): HRF parameters (SPM12 default: [6, 16, 1, 1, 6, 0, 32]) + mask_img (str or Nifti1Image): mask image (e.g., ROI mask) + Returns: + data_filt (ndarray): 2d array of filtered time series data (TxP) + data_hat (ndarray): 2d array of predicted time series data (TxP) - + This is predicted only using regressors of interest (without the constant or other nuisance regressors) + data_adj (ndarray): 2d array of adjusted time series data (TxP) + This is filtered timeseries with constants and other nuisance regressors substrated out + residuals (ndarray): 2d array of residuals (TxP) + """ + if isinstance(mask_img, str): + mask_img = nb.load(mask_img) + if isinstance(mask_img, nb.Nifti1Image): + coords = nt.get_mask_coords(mask_img) + else: + raise TypeError("mask_img must be a nifti1 image or a string") + + hrf, p = spm_hrf(1, P) + SPM.convolve_glm(hrf) + + # get raw time series in roi + y_raw = nt.sample_images(self.rawdata_files, coords) + + # rerun glm + _, info, data_filt, data_hat, data_adj, residuals = self.rerun_glm(y_raw) + + return data_filt, data_hat, data_adj, residuals + def cut(X, pre, at, post, padding='last'): """ @@ -379,7 +407,7 @@ def avg_cut(X, pre, at, post, padding='last'): Returns: y (np.array): - Signal cut and averaged around locations at. + Signal cut around locations at. """ @@ -390,12 +418,12 @@ def avg_cut(X, pre, at, post, padding='last'): return np.array(y_tmp) -def blkdiag(matrices): +def _blkdiag(matrices): """ Constructs a block diagonal matrix from multiple input matrices. Parameters: - *matrices : list of ndarray + matrices : list of ndarray Matrices to be placed on the block diagonal. Returns: @@ -423,7 +451,7 @@ def blkdiag(matrices): return y -def spm_detrend(x, p=0): +def _spm_detrend(x, p=0): """ Polynomial detrending over columns. @@ -460,7 +488,7 @@ def spm_detrend(x, p=0): return y -def spm_orth(X, OPT='pad'): +def _spm_orth(X, OPT='pad'): """ Recursive Gram-Schmidt orthogonalisation of basis functions @@ -520,7 +548,7 @@ def spm_orth(X, OPT='pad'): return X_out -def spm_Volterra(U, bf, V=1): +def _spm_Volterra(U, bf, V=1): """ Generalized convolution of inputs (U) with basis set (bf) From b58876b62dbfa89e9c213ac4bc923819e06d6a01 Mon Sep 17 00:00:00 2001 From: Marco Emanuele <97095997+mnlmrc@users.noreply.github.com> Date: Thu, 27 Mar 2025 16:29:32 -0400 Subject: [PATCH 26/26] Update spm.py --- nitools/spm.py | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/nitools/spm.py b/nitools/spm.py index 39cc3d7..63f0715 100644 --- a/nitools/spm.py +++ b/nitools/spm.py @@ -329,18 +329,18 @@ def update_hrf_params(self, P, mask_img): raise TypeError("mask_img must be a nifti1 image or a string") hrf, p = spm_hrf(1, P) - SPM.convolve_glm(hrf) + self.convolve_glm(hrf) # get raw time series in roi - y_raw = nt.sample_images(self.rawdata_files, coords) + data = nt.sample_images(self.rawdata_files, coords) # rerun glm - _, info, data_filt, data_hat, data_adj, residuals = self.rerun_glm(y_raw) + _, info, data_filt, data_hat, data_adj, residuals = self.rerun_glm(data) return data_filt, data_hat, data_adj, residuals -def cut(X, pre, at, post, padding='last'): +def _cut(X, pre, at, post, padding='last'): """ Cut segment from signal X. @@ -381,7 +381,7 @@ def cut(X, pre, at, post, padding='last'): return y -def avg_cut(X, pre, at, post, padding='last'): +def cut(X, pre, at, post, padding='last'): """ Takes a vector of sample locations (at) and returns the signal (X) aligned and averaged around those locations Args: @@ -413,7 +413,7 @@ def avg_cut(X, pre, at, post, padding='last'): y_tmp = [] for a in at: - y_tmp.append(cut(X, pre, a, post, padding)) + y_tmp.append(_cut(X, pre, a, post, padding)) return np.array(y_tmp) @@ -669,7 +669,9 @@ def spm_hrf(RT, P=None, T=16): hrf = peak - (undershoot / p[4]) # Downsample to TR resolution - hrf = hrf[np.floor(np.arange(0, p[6] / RT) * T).astype(int)] + n_samples = int(p[6] / RT) + 1 + indices = np.round(np.linspace(0, T * n_samples, n_samples, endpoint=False)).astype(int) + hrf = hrf[indices] # Normalize HRF hrf = hrf / np.sum(hrf)