functional_preproc:
run: On
truncation:
# First timepoint to include in analysis.
# Default is 0 (beginning of timeseries).
# First timepoint selection in the scan parameters in the data configuration file, if present, will over-ride this selection.
# Note: the selection here applies to all scans of all participants.
start_tr: 0
# Last timepoint to include in analysis.
# Default is None or End (end of timeseries).
# Last timepoint selection in the scan parameters in the data configuration file, if present, will over-ride this selection.
# Note: the selection here applies to all scans of all participants.
stop_tr: None
scaling:
# Scale functional raw data, usually used in rodent pipeline
run: Off
# Scale the size of the dataset voxels by the factor.
scaling_factor: 10
despiking:
# Run AFNI 3dDespike
# this is a fork point
# run: [On, Off] - this will run both and fork the pipeline
run: [Off]
slice_timing_correction:
# Interpolate voxel time courses so they are sampled at the same time points.
# this is a fork point
# run: [On, Off] - this will run both and fork the pipeline
run: [On]
# use specified slice time pattern rather than one in header
tpattern: None
# align each slice to given time offset
# The default alignment time is the average of the 'tpattern' values (either from the dataset header or from the tpattern option).
tzero: None
motion_estimates_and_correction:
motion_estimates:
# calculate motion statistics BEFORE slice-timing correction
calculate_motion_first: Off
# calculate motion statistics AFTER motion correction
calculate_motion_after: On
motion_correction:
# using: ['3dvolreg', 'mcflirt']
# this is a fork point
using: ['3dvolreg']
# option parameters
AFNI-3dvolreg:
# This option is useful when aligning high-resolution datasets that may need more alignment than a few voxels.
functional_volreg_twopass: On
# Choose motion correction reference. Options: mean, median, selected_volume, fmriprep_reference
motion_correction_reference: ['mean']
# Choose motion correction reference volume
motion_correction_reference_volume: 0
motion_estimate_filter:
# Filter physiological (respiration) artifacts from the head motion estimates.
# Adapted from DCAN Labs filter.
# https://www.ohsu.edu/school-of-medicine/developmental-cognition-and-neuroimaging-lab
# https://www.biorxiv.org/content/10.1101/337360v1.full.pdf
# this is a fork point
# run: [On, Off] - this will run both and fork the pipeline
run: [Off]
# options: "notch", "lowpass"
filter_type: "notch"
# Number of filter coefficients.
filter_order: 4
# Dataset-wide respiratory rate data from breathing belt.
# Notch filter requires either:
# "breathing_rate_min" and "breathing_rate_max"
# or
# "center_frequency" and "filter_bandwitdh".
# Lowpass filter requires either:
# "breathing_rate_min"
# or
# "lowpass_cutoff".
# If "breathing_rate_min" (for lowpass and notch filter)
# and "breathing_rate_max" (for notch filter) are set,
# the values set in "lowpass_cutoff" (for lowpass filter),
# "center_frequency" and "filter_bandwidth" (for notch filter)
# options are ignored.
# Lowest Breaths-Per-Minute in dataset.
# For both notch and lowpass filters.
breathing_rate_min:
# Highest Breaths-Per-Minute in dataset.
# For notch filter.
breathing_rate_max:
# notch filter direct customization parameters
# mutually exclusive with breathing_rate options above.
# If breathing_rate_min and breathing_rate_max are provided,
# the following parameters will be ignored.
# the center frequency of the notch filter
center_frequency:
# the width of the notch filter
filter_bandwidth:
# lowpass filter direct customization parameter
# mutually exclusive with breathing_rate options above.
# If breathing_rate_min is provided, the following
# parameter will be ignored.
# the frequency cutoff of the filter
lowpass_cutoff:
distortion_correction:
# this is a fork point
# run: [On, Off] - this will run both and fork the pipeline
run: [On]
# using: ['PhaseDiff', 'Blip']
# PhaseDiff - Perform field map correction using a single phase difference image, a subtraction of the two phase images from each echo. Default scanner for this method is SIEMENS.
# Blip - Uses AFNI 3dQWarp to calculate the distortion unwarp for EPI field maps of opposite/same phase encoding direction.
# NOTE:
# this is NOT a fork point - instead, the technique used will depend on what type of distortion correction field data accompanies the dataset
# for example, phase-difference field maps will lead to phase-difference distortion correction, and phase-encoding direction field maps will lead to blip-up/blip-down
using: ['PhaseDiff', 'Blip']
# option parameters
PhaseDiff:
# Since the quality of the distortion heavily relies on the skull-stripping step, we provide a choice of method ('AFNI' for AFNI 3dSkullStrip or 'BET' for FSL BET).
# Options: 'BET' or 'AFNI'
fmap_skullstrip_option: 'BET'
# Set the fraction value for the skull-stripping of the magnitude file. Depending on the data, a tighter extraction may be necessary in order to prevent noisy voxels from interfering with preparing the field map.
# The default value is 0.5.
fmap_skullstrip_BET_frac: 0.5
# Set the threshold value for the skull-stripping of the magnitude file. Depending on the data, a tighter extraction may be necessary in order to prevent noisy voxels from interfering with preparing the field map.
# The default value is 0.6.
fmap_skullstrip_AFNI_threshold: 0.6
func_masking:
# using: ['AFNI', 'FSL', 'FSL_AFNI', 'Anatomical_Refined', 'Anatomical_Based', 'Anatomical_Resampled', 'CCS_Anatomical_Refined']
# this is a fork point
using: ['AFNI']
FSL-BET:
# Apply to 4D FMRI data, if bold_bet_functional_mean_boolean : Off.
# Mutually exclusive with functional, reduce_bias, robust, padding, remove_eyes, surfaces
# It must be 'on' if select 'reduce_bias', 'robust', 'padding', 'remove_eyes', or 'bet_surfaces' on
functional_mean_boolean: Off
# Set an intensity threshold to improve skull stripping performances of FSL BET on rodent scans.
functional_mean_thr:
run: Off
threshold_value: 98
# Bias correct the functional mean image to improve skull stripping performances of FSL BET on rodent scans
functional_mean_bias_correction: Off
# Set the threshold value controling the brain vs non-brain voxels.
frac: 0.3
# Mesh created along with skull stripping
mesh_boolean: Off
# Create a surface outline image
outline: Off
# Add padding to the end of the image, improving BET.Mutually exclusive with functional,reduce_bias,robust,padding,remove_eyes,surfaces
padding: Off
# Integer value of head radius
radius: 0
# Reduce bias and cleanup neck. Mutually exclusive with functional,reduce_bias,robust,padding,remove_eyes,surfaces
reduce_bias: Off
# Eyes and optic nerve cleanup. Mutually exclusive with functional,reduce_bias,robust,padding,remove_eyes,surfaces
remove_eyes: Off
# Robust brain center estimation. Mutually exclusive with functional,reduce_bias,robust,padding,remove_eyes,surfaces
robust: Off
# Create a skull image
skull: Off
# Gets additional skull and scalp surfaces by running bet2 and betsurf. This is mutually exclusive with reduce_bias, robust, padding, remove_eyes
surfaces: Off
# Apply thresholding to segmented brain image and mask
threshold: Off
# Vertical gradient in fractional intensity threshold (-1,1)
vertical_gradient: 0.0
FSL_AFNI:
bold_ref: /code/CPAC/resources/templates/tpl-MNI152NLin2009cAsym_res-02_desc-fMRIPrep_boldref.nii.gz
brain_mask: /code/CPAC/resources/templates/tpl-MNI152NLin2009cAsym_res-02_desc-brain_mask.nii.gz
brain_probseg: /code/CPAC/resources/templates/tpl-MNI152NLin2009cAsym_res-01_label-brain_probseg.nii.gz
Anatomical_Refined:
# Choose whether or not to dilate the anatomical mask if you choose 'Anatomical_Refined' as the functional masking option. It will dilate one voxel if enabled.
anatomical_mask_dilation: False
# Apply functional mask in native space
apply_func_mask_in_native_space: On
generate_func_mean:
# Generate mean functional image
run: On
normalize_func:
# Normalize functional image
run: On
nuisance_corrections:
1-ICA-AROMA:
# this is a fork point
# run: [On, Off] - this will run both and fork the pipeline
run: [Off]
# Types of denoising strategy:
# nonaggr: nonaggressive-partial component regression
# aggr: aggressive denoising
denoising_type: nonaggr
2-nuisance_regression:
# this is a fork point
# run: [On, Off] - this will run both and fork the pipeline
run: [On]
# switch to Off if nuisance regression is off and you don't want to write out the regressors
create_regressors: On
# Select which nuisance signal corrections to apply
Regressors:
- Name: 'default'
Motion:
include_delayed: true
include_squared: true
include_delayed_squared: true
aCompCor:
summary:
method: DetrendPC
components: 5
tissues:
- WhiteMatter
- CerebrospinalFluid
extraction_resolution: 2
CerebrospinalFluid:
summary: Mean
extraction_resolution: 2
erode_mask: true
GlobalSignal:
summary: Mean
PolyOrt:
degree: 2
Bandpass:
bottom_frequency: 0.01
top_frequency: 0.1
method: default
- Name: 'defaultNoGSR'
Motion:
include_delayed: true
include_squared: true
include_delayed_squared: true
aCompCor:
summary:
method: DetrendPC
components: 5
tissues:
- WhiteMatter
- CerebrospinalFluid
extraction_resolution: 2
CerebrospinalFluid:
summary: Mean
extraction_resolution: 2
erode_mask: true
PolyOrt:
degree: 2
Bandpass:
bottom_frequency: 0.01
top_frequency: 0.1
method: default
# Standard Lateral Ventricles Binary Mask
# used in CSF mask refinement for CSF signal-related regressions
lateral_ventricles_mask: $FSLDIR/data/atlases/HarvardOxford/HarvardOxford-lateral-ventricles-thr25-2mm.nii.gz
# Whether to run frequency filtering before or after nuisance regression.
# Options: 'After' or 'Before'
bandpass_filtering_order: 'After'
# Process and refine masks used to produce regressors and time series for
# regression.
regressor_masks:
erode_anatomical_brain_mask:
# Erode binarized anatomical brain mask. If choosing True, please also set seg_csf_use_erosion: True; regOption: niworkflows-ants.
run: Off
# Erosion proportion, if using erosion.
# Default proportion is 0 for anatomical brain mask.
# Recommend that do not use erosion in both proportion and millimeter method.
brain_mask_erosion_prop : 0
# Erode brain mask in millimeter, default of brain is 30 mm
# brain erosion default is using millimeter erosion method when use erosion for brain.
brain_mask_erosion_mm : 30
# Erode binarized brain mask in millimeter
brain_erosion_mm: 0
erode_csf:
# Erode binarized csf tissue mask.
run: Off
# Erosion proportion, if use erosion.
# Default proportion is 0 for CSF (cerebrospinal fluid) mask.
# Recommend to do not use erosion in both proportion and millimeter method.
csf_erosion_prop : 0
# Erode brain mask in millimeter, default of csf is 30 mm
# CSF erosion default is using millimeter erosion method when use erosion for CSF.
csf_mask_erosion_mm: 30
# Erode binarized CSF (cerebrospinal fluid) mask in millimeter
csf_erosion_mm: 0
erode_wm:
# Erode WM binarized tissue mask.
run: Off
# Erosion proportion, if use erosion.
# Default proportion is 0.6 for White Matter mask.
# Recommend to do not use erosion in both proportion and millimeter method.
# White Matter erosion default is using proportion erosion method when use erosion for White Matter.
wm_erosion_prop : 0.6
# Erode brain mask in millimeter, default of White Matter is 0 mm
wm_mask_erosion_mm: 0
# Erode binarized White Matter mask in millimeter
wm_erosion_mm: 0
erode_gm:
# Erode GM binarized tissue mask.
run: Off
# Erosion proportion, if use erosion.
# Recommend to do not use erosion in both proportion and millimeter method.
gm_erosion_prop : 0.6
# Erode brain mask in millimeter, default of csf is 30 mm
gm_mask_erosion_mm: 30
# Erode binarized White Matter mask in millimeter
gm_erosion_mm: 0
# OUTPUTS AND DERIVATIVES
# -----------------------
post_processing:
spatial_smoothing:
# Smooth the derivative outputs.
# Set as ['nonsmoothed'] to disable smoothing. Set as both to get both.
#
# Options:
# ['smoothed', 'nonsmoothed']
output: ['smoothed']
# Tool to use for smoothing.
# 'FSL' for FSL MultiImageMaths for FWHM provided
# 'AFNI' for AFNI 3dBlurToFWHM for FWHM provided
smoothing_method: ['FSL']
# Full Width at Half Maximum of the Gaussian kernel used during spatial smoothing.
# this is a fork point
# i.e. multiple kernels - fwhm: [4,6,8]
fwhm: [4]
z-scoring:
# z-score standardize the derivatives. This may be needed for group-level analysis.
# Set as ['raw'] to disable z-scoring. Set as both to get both.
#
# Options:
# ['z-scored', 'raw']
output: ['z-scored']
timeseries_extraction:
run: On
# Enter paths to region-of-interest (ROI) NIFTI files (.nii or .nii.gz) to be used for time-series extraction, and then select which types of analyses to run.
# Denote which analyses to run for each ROI path by listing the names below. For example, if you wish to run Avg and SpatialReg, you would enter: '/path/to/ROI.nii.gz': Avg, SpatialReg
# available analyses:
# /path/to/atlas.nii.gz: Avg, Voxel, SpatialReg, PearsonCorr, PartialCorr
tse_roi_paths:
/cpac_templates/CC400.nii.gz: Avg
/cpac_templates/aal_mask_pad.nii.gz: Avg
/cpac_templates/CC200.nii.gz: Avg
/cpac_templates/tt_mask_pad.nii.gz: Avg
/cpac_templates/PNAS_Smith09_rsn10.nii.gz: SpatialReg
/cpac_templates/ho_mask_pad.nii.gz: Avg
/cpac_templates/rois_3mm.nii.gz: Avg
/ndmg_atlases/label/Human/AAL_space-MNI152NLin6_res-1x1x1.nii.gz: Avg
/ndmg_atlases/label/Human/CAPRSC_space-MNI152NLin6_res-1x1x1.nii.gz: Avg
/ndmg_atlases/label/Human/DKT_space-MNI152NLin6_res-1x1x1.nii.gz: Avg
/ndmg_atlases/label/Human/DesikanKlein_space-MNI152NLin6_res-1x1x1.nii.gz: Avg
/ndmg_atlases/label/Human/HarvardOxfordcort-maxprob-thr25_space-MNI152NLin6_res-1x1x1.nii.gz: Avg
/ndmg_atlases/label/Human/HarvardOxfordsub-maxprob-thr25_space-MNI152NLin6_res-1x1x1.nii.gz: Avg
/ndmg_atlases/label/Human/Juelich_space-MNI152NLin6_res-1x1x1.nii.gz: Avg
/ndmg_atlases/label/Human/MICCAI_space-MNI152NLin6_res-1x1x1.nii.gz: Avg
/ndmg_atlases/label/Human/Schaefer1000_space-MNI152NLin6_res-1x1x1.nii.gz: Avg
/ndmg_atlases/label/Human/Schaefer200_space-MNI152NLin6_res-1x1x1.nii.gz: Avg
/ndmg_atlases/label/Human/Schaefer300_space-MNI152NLin6_res-1x1x1.nii.gz: Avg
/ndmg_atlases/label/Human/Schaefer400_space-MNI152NLin6_res-1x1x1.nii.gz: Avg
/ndmg_atlases/label/Human/Talairach_space-MNI152NLin6_res-1x1x1.nii.gz: Avg
/ndmg_atlases/label/Human/Brodmann_space-MNI152NLin6_res-1x1x1.nii.gz: Avg
/ndmg_atlases/label/Human/Desikan_space-MNI152NLin6_res-1x1x1.nii.gz: Avg
/ndmg_atlases/label/Human/Glasser_space-MNI152NLin6_res-1x1x1.nii.gz: Avg
/ndmg_atlases/label/Human/Slab907_space-MNI152NLin6_res-1x1x1.nii.gz: Avg
/ndmg_atlases/label/Human/Yeo-17-liberal_space-MNI152NLin6_res-1x1x1.nii.gz: Avg
/ndmg_atlases/label/Human/Yeo-17_space-MNI152NLin6_res-1x1x1.nii.gz: Avg
/ndmg_atlases/label/Human/Yeo-7-liberal_space-MNI152NLin6_res-1x1x1.nii.gz: Avg
/ndmg_atlases/label/Human/Yeo-7_space-MNI152NLin6_res-1x1x1.nii.gz: Avg
# Functional time-series and ROI realignment method: ['ROI_to_func'] or ['func_to_ROI']
# 'ROI_to_func' will realign the atlas/ROI to functional space (fast)
# 'func_to_ROI' will realign the functional time series to the atlas/ROI space
#
# NOTE: in rare cases, realigning the ROI to the functional space may
# result in small misalignments for very small ROIs - please double
# check your data if you see issues
realignment: 'ROI_to_func'
seed_based_correlation_analysis:
# SCA - Seed-Based Correlation Analysis
# For each extracted ROI Average time series, CPAC will generate a whole-brain correlation map.
# It should be noted that for a given seed/ROI, SCA maps for ROI Average time series will be the same.
run: Off
# Enter paths to region-of-interest (ROI) NIFTI files (.nii or .nii.gz) to be used for seed-based correlation analysis, and then select which types of analyses to run.
# Denote which analyses to run for each ROI path by listing the names below. For example, if you wish to run Avg and MultReg, you would enter: '/path/to/ROI.nii.gz': Avg, MultReg
# available analyses:
# /path/to/atlas.nii.gz: Avg, DualReg, MultReg
sca_roi_paths:
/cpac_templates/PNAS_Smith09_rsn10.nii.gz: DualReg
/cpac_templates/CC400.nii.gz: Avg, MultReg
/cpac_templates/ez_mask_pad.nii.gz: Avg, MultReg
/cpac_templates/aal_mask_pad.nii.gz: Avg, MultReg
/cpac_templates/CC200.nii.gz: Avg, MultReg
/cpac_templates/tt_mask_pad.nii.gz: Avg, MultReg
/cpac_templates/ho_mask_pad.nii.gz: Avg, MultReg
/cpac_templates/rois_3mm.nii.gz: Avg, MultReg
# Normalize each time series before running Dual Regression SCA.
norm_timeseries_for_DR: True
amplitude_low_frequency_fluctuation:
# ALFF & f/ALFF
# Calculate Amplitude of Low Frequency Fluctuations (ALFF) and and fractional ALFF (f/ALFF) for all voxels.
run: On
# Frequency cutoff (in Hz) for the high-pass filter used when calculating f/ALFF.
highpass_cutoff: [0.01]
# Frequency cutoff (in Hz) for the low-pass filter used when calculating f/ALFF
lowpass_cutoff: [0.1]
regional_homogeneity:
# ReHo
# Calculate Regional Homogeneity (ReHo) for all voxels.
run: On
# Number of neighboring voxels used when calculating ReHo
# 7 (Faces)
# 19 (Faces + Edges)
# 27 (Faces + Edges + Corners)
cluster_size: 27
voxel_mirrored_homotopic_connectivity:
# VMHC
# Calculate Voxel-mirrored Homotopic Connectivity (VMHC) for all voxels.
run: On
symmetric_registration:
# Included as part of the 'Image Resource Files' package available on the Install page of the User Guide.
# It is not necessary to change this path unless you intend to use a non-standard symmetric template.
T1w_brain_template_symmetric: $FSLDIR/data/standard/MNI152_T1_${resolution_for_anat}_brain_symmetric.nii.gz
# A reference symmetric brain template for resampling
T1w_brain_template_symmetric_for_resample: $FSLDIR/data/standard/MNI152_T1_1mm_brain_symmetric.nii.gz
# Included as part of the 'Image Resource Files' package available on the Install page of the User Guide.
# It is not necessary to change this path unless you intend to use a non-standard symmetric template.
T1w_template_symmetric: $FSLDIR/data/standard/MNI152_T1_${resolution_for_anat}_symmetric.nii.gz
# A reference symmetric skull template for resampling
T1w_template_symmetric_for_resample: $FSLDIR/data/standard/MNI152_T1_1mm_symmetric.nii.gz
# Included as part of the 'Image Resource Files' package available on the Install page of the User Guide.
# It is not necessary to change this path unless you intend to use a non-standard symmetric template.
dilated_symmetric_brain_mask: $FSLDIR/data/standard/MNI152_T1_${resolution_for_anat}_brain_mask_symmetric_dil.nii.gz
# A reference symmetric brain mask template for resampling
dilated_symmetric_brain_mask_for_resample: $FSLDIR/data/standard/MNI152_T1_1mm_brain_mask_symmetric_dil.nii.gz
network_centrality:
# Calculate Degree, Eigenvector Centrality, or Functional Connectivity Density.
run: On
# Maximum amount of RAM (in GB) to be used when calculating Degree Centrality.
# Calculating Eigenvector Centrality will require additional memory based on the size of the mask or number of ROI nodes.
memory_allocation: 1.0
# Full path to a NIFTI file describing the mask. Centrality will be calculated for all voxels within the mask.
template_specification_file: /cpac_templates/Mask_ABIDE_85Percent_GM.nii.gz
degree_centrality:
# Enable/Disable degree centrality by selecting the connectivity weights
# weight_options: ['Binarized', 'Weighted']
# disable this type of centrality with:
# weight_options: []
weight_options: ['Binarized', 'Weighted']
# Select the type of threshold used when creating the degree centrality adjacency matrix.
# options:
# 'Significance threshold', 'Sparsity threshold', 'Correlation threshold'
correlation_threshold_option: 'Sparsity threshold'
# Based on the Threshold Type selected above, enter a Threshold Value.
# P-value for Significance Threshold
# Sparsity value for Sparsity Threshold
# Pearson's r value for Correlation Threshold
correlation_threshold: 0.001
eigenvector_centrality:
# Enable/Disable eigenvector centrality by selecting the connectivity weights
# weight_options: ['Binarized', 'Weighted']
# disable this type of centrality with:
# weight_options: []
weight_options: ['Weighted']
# Select the type of threshold used when creating the eigenvector centrality adjacency matrix.
# options:
# 'Significance threshold', 'Sparsity threshold', 'Correlation threshold'
correlation_threshold_option: 'Sparsity threshold'
# Based on the Threshold Type selected above, enter a Threshold Value.
# P-value for Significance Threshold
# Sparsity value for Sparsity Threshold
# Pearson's r value for Correlation Threshold
correlation_threshold: 0.001
local_functional_connectivity_density:
# Enable/Disable lFCD by selecting the connectivity weights
# weight_options: ['Binarized', 'Weighted']
# disable this type of centrality with:
# weight_options: []
weight_options: ['Binarized', 'Weighted']
# Select the type of threshold used when creating the lFCD adjacency matrix.
# options:
# 'Significance threshold', 'Correlation threshold'
correlation_threshold_option: 'Correlation threshold'
# Based on the Threshold Type selected above, enter a Threshold Value.
# P-value for Significance Threshold
# Sparsity value for Sparsity Threshold
# Pearson's r value for Correlation Threshold
correlation_threshold: 0.6
I think this has something to do with the value for csf_erosion_prop
. In your config it’s set to 0 but typical values are from 0.7-0.9. Are you looking for a more stringent CSF mask?
I’m sorry for replying so late. My goal is to extract signals and complete preprocessing for the corresponding individuals. The size of csf_erosion_prop
is secondary for me. Therefore, following your advice, I changed the size of csf_erosion_prop
to 0.7. However, the results obtained are quite similar to when csf_erosion_prop
is set to 0, and the error related to the ‘number of timepoint’ still persists.
The processing results are shown in the screenshot below. The white is the old result (csf_erosion_prop
to 0), and the overlay red is the new result (csf_erosion_prop
to 0.7).
Hi, sorry for the delay. I am going to try and replicate this error so that I can look into the working directory. What version of CPAC are you using?
Hi, the version of C-PAC that I am using is 1.8.1.
I reproduced the error on the same dataset, and it looks like the priors are causing this problem. Did you specifically want to use priors?
It would be better to apply the same preprocessing configurations to all the subjects, but if not using priors can lead to a reasonable output, I think not using priors is cool, I will try it on the data to see if there are any further questions, thanks for your help!
by the way, do I just need to change use_priors run: On
to use_priors run: Off
or I need to make more changes?
Yes, that’s all you have to change!
Hi, sorry for the late reply, I followed your advice and subject-29576 can be preprocessed perfectly, however, when I apply the same pipeline to other subjects, although the CSF file is alright the ‘ValueError: Number of time points’ still remains.
Below is the CSF mask result:
And here is the log files:
pypeline.txt (361.7 KB)
29542_pypeline.txt (362.5 KB)
29576_pypeline.txt (809.7 KB)
29577_pypeline.txt (362.3 KB)
Could you please help me to find out which part goes wrong?
Thank you for your time!
After running some tests, it looks like the lateral ventricle mask may be too stringent for some of the subjects, which is why this error appears to be data dependent. The error disappeared when I ran this pipeline without the lateral mask. Omitting the mask would be a less conservative approach to extracting the CSF mask, but it seems like it isn’t compatible with the dataset. You can just replace the path to the mask with None
. Let me know if you have any questions!
Hi @ekenneally,
I’m trying to do the same thing: extracting time series using CPAC as the processing pipeline for ABIDE 2. I used the default pipeline and configured it to produce the same time series for the atlases I have in ABIDE 1. Previously, to ensure my configuration file was correct, I tested it on ABIDE 1. However, when I compared my time series with those from ABIDE 1, I found that they were not the same—not even approximately.
What can I do to generate the same results?
Hi @ichkifa, thanks for reaching out! Are you able to share your configuration file so I can look into this further?
%YAML 1.1
---
pipeline_setup:
pipeline_name: cpacImanitaFinal1
output_directory:
path: /outputs/output
source_outputs_dir: None
pull_source_once: True
write_func_outputs: True
write_debugging_outputs: True
output_tree: "default"
# Quality control outputs
quality_control:
# Generate quality control pages containing preprocessing and derivative outputs.
generate_quality_control_images: False
generate_xcpqc_files: False
working_directory:
path: /work
remove_working_dir: True
log_directory:
run_logging: True
path: /outputs/log
graphviz:
# Configuration for a graphviz visualization of the entire workflow. See https://fcp-indi.github.io/docs/developer/nodes#CPAC.pipeline.nipype_pipeline_engine.Workflow.write_graph for details about the various options
entire_workflow:
# Whether to generate the graph visualization
generate: Off
# Options: [orig, hierarchical, flat, exec, colored]
graph2use: []
# Options: [svg, png]
format: []
# The node name will be displayed in the form `nodename (package)` when On or `nodename.Class.package` when Off
simple_form: On
crash_log_directory:
path: /outputs/crash
system_config:
fail_fast: Off
random_seed:
# The maximum amount of memory each participant's workflow can allocate.
# Use this to place an upper bound of memory usage.
# - Warning: 'Memory Per Participant' multiplied by 'Number of Participants to Run Simultaneously'
# must not be more than the total amount of RAM.
# - Conversely, using too little RAM can impede the speed of a pipeline run.
# - It is recommended that you set this to a value that when multiplied by
# 'Number of Participants to Run Simultaneously' is as much RAM you can safely allocate.
maximum_memory_per_participant: 1
# Prior to running a pipeline C-PAC makes a rough estimate of a worst-case-scenario maximum concurrent memory usage with high-resoltion data, raising an exception describing the recommended minimum memory allocation for the given configuration.
# Turning this option off will allow pipelines to run without allocating the recommended minimum, allowing for more efficient runs at the risk of out-of-memory crashes (use at your own risk)
raise_insufficient: On
# A callback.log file from a previous run can be provided to estimate memory usage based on that run.
observed_usage:
# Path to callback log file with previously observed usage.
# Can be overridden with the commandline flag `--runtime_usage`.
callback_log:
# Percent. E.g., `buffer: 10` would estimate 1.1 * the observed memory usage from the callback log provided in "usage".
# Can be overridden with the commandline flag `--runtime_buffer`.
buffer: 10
max_cores_per_participant: 1
num_ants_threads: 1
num_OMP_threads: 1
num_participants_at_once: 1
FSLDIR: FSLDIR
Amazon-AWS:
# If setting the 'Output Directory' to an S3 bucket, insert the path to your AWS credentials file here.
aws_output_bucket_credentials:
# Enable server-side 256-AES encryption on data to the S3 bucket
s3_encryption: False
Debugging:
# Verbose developer messages.
verbose: On
##################################################
anatomical_preproc:
run: On
brain_extraction:
run: On
using: ['3dSkullStrip']
functional_preproc:
run: On
truncation:
start_tr: 0
stop_tr: None
func_masking:
run: On
using: ['AFNI']
slice_timing_correction:
# Interpolate voxel time courses so they are sampled at the same time points.
# this is a fork point
# run: [On, Off] - this will run both and fork the pipeline
run: [On]
# use specified slice time pattern rather than one in header
tpattern: None
motion_estimates_and_correction:
run: On
motion_estimates:
# calculate motion statistics BEFORE slice-timing correction
calculate_motion_first: Off
# calculate motion statistics AFTER motion correction
calculate_motion_after: On
motion_correction:
# using: ['3dvolreg', 'mcflirt']
# Forking is currently broken for this option.
# Please use separate configs if you want to use each of 3dvolreg and mcflirt.
# Follow https://github.com/FCP-INDI/C-PAC/issues/1935 to see when this issue is resolved.
using: ['3dvolreg']
# option parameters
AFNI-3dvolreg:
# This option is useful when aligning high-resolution datasets that may need more alignment than a few voxels.
functional_volreg_twopass: On
# Choose motion correction reference. Options: mean, median, selected_volume, fmriprep_reference
motion_correction_reference: ['mean']
# Choose motion correction reference volume
motion_correction_reference_volume: 0
generate_func_mean:
# Generate mean functional image
run: On
normalize_func:
# Normalize functional image
run: On
coreg_prep:
# Generate sbref
run: On
segmentation:
# Automatically segment anatomical images into white matter, gray matter,
# and CSF based on prior probability maps.
run: On
tissue_segmentation:
# using: ['FSL-FAST', 'Template_Based', 'ANTs_Prior_Based', 'FreeSurfer']
# this is a fork point
using: ['FSL-FAST']
# option parameters
FSL-FAST:
thresholding:
# thresholding of the tissue segmentation probability maps
# options: 'Auto', 'Custom'
use: 'Auto'
Custom:
# Set the threshold value for the segmentation probability masks (CSF, White Matter, and Gray Matter)
# The values remaining will become the binary tissue masks.
# A good starting point is 0.95.
# CSF (cerebrospinal fluid) threshold.
CSF_threshold_value : 0.96
# White matter threshold.
WM_threshold_value : 0.96
# Gray matter threshold.
GM_threshold_value : 0.7
use_priors:
# Use template-space tissue priors to refine the binary tissue masks generated by segmentation.
run: On
# Full path to a directory containing binarized prior probability maps.
# These maps are included as part of the 'Image Resource Files' package available on the Install page of the User Guide.
# It is not necessary to change this path unless you intend to use non-standard priors.
priors_path: $FSLDIR/data/standard/tissuepriors/2mm
# Full path to a binarized White Matter prior probability map.
# It is not necessary to change this path unless you intend to use non-standard priors.
WM_path: $priors_path/avg152T1_white_bin.nii.gz
# Full path to a binarized Gray Matter prior probability map.
# It is not necessary to change this path unless you intend to use non-standard priors.
GM_path: $priors_path/avg152T1_gray_bin.nii.gz
# Full path to a binarized CSF prior probability map.
# It is not necessary to change this path unless you intend to use non-standard priors.
CSF_path: $priors_path/avg152T1_csf_bin.nii.gz
nuisance_corrections:
1-ICA-AROMA:
# this is a fork point
# run: [On, Off] - this will run both and fork the pipeline
run: [Off]
# Types of denoising strategy:
# nonaggr: nonaggressive-partial component regression
# aggr: aggressive denoising
denoising_type: nonaggr
2-nuisance_regression:
# this is a fork point
# run: [On, Off] - this will run both and fork the pipeline
run: [On]
# this is not a fork point
# Run nuisance regression in native or template space
# - If set to template, will use the brain mask configured in
# ``functional_preproc: func_masking: FSL_AFNI: brain_mask``
# - If ``registration_workflows: functional_registration: func_registration_to_template: apply_trasnform: using: single_step_resampling_from_stc``, this must be set to template
space: native
# switch to Off if nuisance regression is off and you don't want to write out the regressors
create_regressors: On
# Select which nuisance signal corrections to apply
Regressors:
- Name: 'filt_global'
Motion:
include_delayed: true
include_squared: true
include_delayed_squared: true
aCompCor:
summary:
method: DetrendPC
components: 5
tissues:
- WhiteMatter
- CerebrospinalFluid
extraction_resolution: 2
CerebrospinalFluid:
summary: Mean
extraction_resolution: 2
erode_mask: true
GlobalSignal:
summary: Mean
PolyOrt:
degree: 2
Bandpass:
bottom_frequency: 0.01
top_frequency: 0.1
method: default
# Standard Lateral Ventricles Binary Mask
# used in CSF mask refinement for CSF signal-related regressions
lateral_ventricles_mask: $FSLDIR/data/atlases/HarvardOxford/HarvardOxford-lateral-ventricles-thr25-2mm.nii.gz
# Whether to run frequency filtering before or after nuisance regression.
# Options: 'After' or 'Before'
bandpass_filtering_order: 'After'
# Process and refine masks used to produce regressors and time series for
# regression.
regressor_masks:
erode_anatomical_brain_mask:
# Erode binarized anatomical brain mask. If choosing True, please also set regressor_masks['erode_csf']['run']: True; anatomical_preproc['brain_extraction']['using']: niworkflows-ants.
run: Off
# Target volume ratio, if using erosion.
# Default proportion is None for anatomical brain mask.
# If using erosion, using both proportion and millimeters is not recommended.
brain_mask_erosion_prop:
# Erode brain mask in millimeters, default for brain mask is 30 mm
# Brain erosion default is using millimeters.
brain_mask_erosion_mm: 30
# Erode binarized brain mask in millimeter
brain_erosion_mm:
erode_csf:
# Erode binarized csf tissue mask.
run: Off
# Target volume ratio, if using erosion.
# Default proportion is None for cerebrospinal fluid mask.
# If using erosion, using both proportion and millimeters is not recommended.
csf_erosion_prop:
# Erode cerebrospinal fluid mask in millimeters, default for cerebrospinal fluid is 30mm
# Cerebrospinal fluid erosion default is using millimeters.
csf_mask_erosion_mm: 30
# Erode binarized cerebrospinal fluid mask in millimeter
csf_erosion_mm:
erode_wm:
# Erode WM binarized tissue mask.
run: Off
# Target volume ratio, if using erosion.
# Default proportion is 0.6 for white matter mask.
# If using erosion, using both proportion and millimeters is not recommended.
# White matter erosion default is using proportion erosion method when use erosion for white matter.
wm_erosion_prop: 0.6
# Erode white matter mask in millimeters, default for white matter is None
wm_mask_erosion_mm:
# Erode binarized white matter mask in millimeters
wm_erosion_mm:
erode_gm:
# Erode gray matter binarized tissue mask.
run: Off
# Target volume ratio, if using erosion.
# If using erosion, using both proportion and millimeters is not recommended.
gm_erosion_prop: 0.6
# Erode gray matter mask in millimeters
gm_mask_erosion_mm:
# Erode binarized gray matter mask in millimeters
gm_erosion_mm:
post_processing:
z-scoring:
run: Off
registration_workflows:
anatomical_registration:
run: On
resolution_for_anat: 2mm
T1w_brain_template: $FSLDIR/data/standard/MNI152_T1_${resolution_for_anat}_brain.nii.gz
T1w_template: $FSLDIR/data/standard/MNI152_T1_${resolution_for_anat}.nii.gz
T1w_brain_template_mask: $FSLDIR/data/standard/MNI152_T1_${resolution_for_anat}_brain_mask.nii.gz
reg_with_skull: True
registration:
using: ['ANTS']
FSL-FNIRT:
fnirt_config: T1_2_MNI152_2mm
ref_mask: $FSLDIR/data/standard/MNI152_T1_${resolution_for_anat}_brain_mask_symmetric_dil.nii.gz
functional_registration:
coregistration:
# functional (BOLD/EPI) registration to anatomical (structural/T1)
run: On
func_input_prep:
# Choose whether to use functional brain or skull as the input to functional-to-anatomical registration
reg_with_skull: Off
# Choose whether to use the mean of the functional/EPI as the input to functional-to-anatomical registration or one of the volumes from the functional 4D timeseries that you choose.
# input: ['Mean_Functional', 'Selected_Functional_Volume', 'fmriprep_reference']
input: ['Mean_Functional']
Selected Functional Volume:
# Only for when 'Use as Functional-to-Anatomical Registration Input' is set to 'Selected Functional Volume'.
#Input the index of which volume from the functional 4D timeseries input file you wish to use as the input for functional-to-anatomical registration.
func_reg_input_volume: 0
boundary_based_registration:
# this is a fork point
# run: [On, Off] - this will run both and fork the pipeline
run: [On]
# Standard FSL 5.0 Scheduler used for Boundary Based Registration.
# It is not necessary to change this path unless you intend to use non-standard MNI registration.
bbr_schedule: $FSLDIR/etc/flirtsch/bbr.sch
func_registration_to_template:
# these options modify the application (to the functional data), not the calculation, of the
# T1-to-template and EPI-to-template transforms calculated earlier during registration
# apply the functional-to-template (T1 template) registration transform to the functional data
run: On
output_resolution:
# The resolution (in mm) to which the preprocessed, registered functional timeseries outputs are written into.
# NOTE:
# selecting a 1 mm or 2 mm resolution might substantially increase your RAM needs- these resolutions should be selected with caution.
# for most cases, 3 mm or 4 mm resolutions are suggested.
# NOTE:
# this also includes the single-volume 3D preprocessed functional data,
# such as the mean functional (mean EPI) in template space
func_preproc_outputs: 3mm
# The resolution (in mm) to which the registered derivative outputs are written into.
# NOTE:
# this is for the single-volume functional-space outputs (i.e. derivatives)
# thus, a higher resolution may not result in a large increase in RAM needs as above
func_derivative_outputs: 3mm
target_template:
# choose which template space to transform derivatives towards
# using: ['T1_template', 'EPI_template']
# this is a fork point
# NOTE:
# this will determine which registration transform to use to warp the functional
# outputs and derivatives to template space
using: ['T1_template']
T1_template:
# Standard Skull Stripped Template. Used as a reference image for functional registration.
# This can be different than the template used as the reference/fixed for T1-to-template registration.
T1w_brain_template_funcreg: $FSLDIR/data/standard/MNI152_T1_3mm_brain.nii.gz
# Standard Anatomical Brain Image with Skull.
# This can be different than the template used as the reference/fixed for T1-to-template registration.
T1w_template_funcreg: $FSLDIR/data/standard/MNI152_T1_3mm.nii.gz
# Template to be used during registration.
# It is not necessary to change this path unless you intend to use a non-standard template.
T1w_brain_template_mask_funcreg: $FSLDIR/data/standard/MNI152_T1_3mm_brain_mask.nii.gz
# a standard template for resampling if using float resolution
T1w_template_for_resample: $FSLDIR/data/standard/MNI152_T1_1mm_brain.nii.gz
FNIRT_pipelines:
# Interpolation method for writing out transformed functional images.
# Possible values: trilinear, sinc, spline
interpolation: sinc
# Identity matrix used during FSL-based resampling of functional-space data throughout the pipeline.
# It is not necessary to change this path unless you intend to use a different template.
identity_matrix: $FSLDIR/etc/flirtsch/ident.mat
timeseries_extraction:
run: On
tse_roi_paths:
/cpac_templates/CC400.nii.gz: Avg
/cpac_templates/CC200.nii.gz: Avg
/cpac_templates/aal_mask_pad.nii.gz: Avg
realignment: 'ROI_to_func'
connectivity_matrix:
# Create a connectivity matrix from timeseries data
# Options:
# ['AFNI', 'Nilearn', 'ndmg']
using:
- Nilearn
measure:
- Pearson
# PACKAGE INTEGRATIONS
# --------------------
PyPEER:
# Training of eye-estimation models. Commonly used for movies data/naturalistic viewing.
run: Off
# PEER scan names to use for training
# Example: ['peer_run-1', 'peer_run-2']
eye_scan_names: []
# Naturalistic viewing data scan names to use for eye estimation
# Example: ['movieDM']
data_scan_names: []
# Template-space eye mask
eye_mask_path: $FSLDIR/data/standard/MNI152_T1_${func_resolution}_eye_mask.nii.gz
# PyPEER Stimulus File Path
# This is a file describing the stimulus locations from the calibration sequence.
stimulus_path: None
minimal_nuisance_correction:
# PyPEER Minimal nuisance regression
# Note: PyPEER employs minimal preprocessing - these choices do not reflect what runs in the main pipeline.
# PyPEER uses non-nuisance-regressed data from the main pipeline.
# Global signal regression (PyPEER only)
peer_gsr: True
# Motion scrubbing (PyPEER only)
peer_scrub: False
# Motion scrubbing threshold (PyPEER only)
scrub_thresh: 0.2
Hi @ichkifa,
Thank you! Can I ask you to clarify whether you were able to replicate the preprocessing from ABIDE I, but haven’t been able replicate it for ABIDE II, or if you’re comparing ABIDE II outputs to ABIDE I outputs?
Looking at the ABIDE II documentation, it appears that there are a handful of subjects that have data in common with ABIDE I, but not all of them. Since the timeseries are therefore coming from different participants, and may also have been produced using a range of C-PAC versions and pipeline configs, they won’t be the same when comparing ABIDE I/II.
Hello @tamsinrogers,
Thank you for your message!
I am currently working on reproducing the time series of ABIDE I from the raw fMRI images, using C-PAC with the same configuration that was used for ABIDE preprocessed.
The goal is to validate my configuration by comparing the time series I obtain with those already published for ABIDE I. This will ensure that my configuration is correct before using it to process the ABIDE II data.
Best regards,
Hi @ichkifa,
Thanks for the clarification! Our team is reproducing on our end and will get back to you shortly.