diff --git a/deepMask/inference.py b/deepMask/inference.py index 8082f0b..8d8996e 100755 --- a/deepMask/inference.py +++ b/deepMask/inference.py @@ -57,7 +57,7 @@ def main(): id=sys.argv[1], t1=args.t1, t2=args.t2, - output_suffix="_brain_final.nii.gz", + output_suffix="_brain.nii.gz", output_dir=args.outdir, template=template, usen3=True, diff --git a/deepMask/utils/bids_metadata.py b/deepMask/utils/bids_metadata.py new file mode 100644 index 0000000..2310e9e --- /dev/null +++ b/deepMask/utils/bids_metadata.py @@ -0,0 +1,486 @@ +#!/usr/bin/env python3 +""" +BIDS metadata generation utilities for deepFCD pipeline. +""" + +import datetime +import json +import os +from typing import Dict, List, Optional, Tuple + + +def get_image_metadata(image_path: str) -> Dict: + """Extract basic metadata from image file path. + + Args: + image_path: Path to the NIfTI image file + + Returns: + Dictionary containing basic image metadata + """ + try: + # For now, return basic metadata that doesn't require nibabel + # This can be enhanced later when nibabel is available + metadata = { + "ImageType": ["DERIVED", "PRIMARY"], + "ProcessingNote": "Metadata extracted from file path only. Install nibabel for detailed image properties.", + "Extension": os.path.splitext(image_path)[1], + } + + return metadata + + except Exception as e: + print(f"Warning: Could not extract metadata from {image_path}: {e}") + return {} + + +def generate_preprocessing_metadata( + subject_id: str, + session_id: Optional[str], + modality: str, + output_path: str, + original_files: List[str], + processing_steps: List[str], + software_versions: Dict[str, str] = None, + space: str = "MNI152NLin2009aSym", +) -> Dict: + """Generate BIDS-compliant metadata for preprocessed files. + + Args: + subject_id: Subject identifier (e.g., 'sub-001') + session_id: Session identifier (e.g., 'ses-01') or None + modality: Image modality ('T1w' or 'FLAIR') + output_path: Path to the output preprocessed file + original_files: List of original input files used + processing_steps: List of processing steps applied + software_versions: Dictionary of software versions used + space: Coordinate space of the output image + + Returns: + Dictionary containing BIDS metadata + """ + if software_versions is None: + software_versions = { + "deepMask": "latest", + "ANTs": "2.3.3+", + "N3BiasFieldCorrection": "ITK-based", + } + + # Get current timestamp + timestamp = datetime.datetime.now().isoformat() + + # Extract image-specific metadata if file exists + image_metadata = {} + if os.path.exists(output_path): + image_metadata = get_image_metadata(output_path) + + # Generate base metadata + metadata = { + "Description": f"Brain-extracted and preprocessed {modality} anatomical image", + "Units": "arbitrary", + "SpatialReference": space, + "SkullStripped": True, + "ProcessingSteps": processing_steps, + "SoftwareVersions": software_versions, + "IntendedFor": "deepFCD focal cortical dysplasia detection", + "Sources": original_files, + "GeneratedBy": [ + { + "Name": "deepFCD-preproc", + "Version": "1.0.0", + "Description": "Preprocessing pipeline for deepFCD", + "CodeURL": "https://github.com/NOEL-MNI/deepFCD", + } + ], + "ProcessingTimestamp": timestamp, + } + + # Add image-specific metadata + metadata.update(image_metadata) + + # Add modality-specific information + if modality == "T1w": + metadata["SequenceType"] = "T1-weighted" + metadata["ContrastType"] = "T1w" + elif modality == "FLAIR": + metadata["SequenceType"] = "T2-FLAIR" + metadata["ContrastType"] = "FLAIR" + metadata["InversionTime"] = None # Would need to be extracted from DICOM + + return metadata + + +def write_json_sidecar(metadata: Dict, json_path: str) -> None: + """Write metadata to a JSON sidecar file. + + Args: + metadata: Dictionary containing metadata + json_path: Path where JSON file should be written + """ + try: + os.makedirs(os.path.dirname(json_path), exist_ok=True) + + with open(json_path, "w") as f: + json.dump(metadata, f, indent=2, ensure_ascii=False) + + print(f"Generated BIDS metadata: {json_path}") + + except Exception as e: + print(f"Error writing JSON sidecar {json_path}: {e}") + + +def generate_dataset_description( + output_dir: str, + pipeline_name: str = "deepFCD-preproc", + description: str = "Preprocessing pipeline for deepFCD: brain extraction, registration, and bias correction", +) -> None: + """Generate or update dataset_description.json for derivatives. + + Args: + output_dir: Root directory of the derivatives dataset + pipeline_name: Name of the pipeline + description: Description of the pipeline + """ + dataset_desc_path = os.path.join(output_dir, "dataset_description.json") + + dataset_description = { + "Name": pipeline_name, + "BIDSVersion": "1.7.0", + "DatasetType": "derivative", + "GeneratedBy": [ + { + "Name": pipeline_name, + "Version": "1.0.0", + "Description": description, + "CodeURL": "https://github.com/NOEL-MNI/deepFCD", + } + ], + "HowToAcknowledge": "Please cite the deepFCD paper when using this software.", + "PipelineDescription": { + "Name": pipeline_name, + "Version": "1.0.0", + "Description": description, + }, + "SourceDatasets": [ + { + "Description": "Raw BIDS dataset containing T1w and FLAIR anatomical images" + } + ], + "Sources": [ + { + "Suffix": "T1w", + "Description": "T1-weighted anatomical images", + "Units": "arbitrary", + }, + { + "Suffix": "FLAIR", + "Description": "T2-FLAIR anatomical images", + "Units": "arbitrary", + }, + ], + } + + # Write dataset description if it doesn't exist + if not os.path.exists(dataset_desc_path): + write_json_sidecar(dataset_description, dataset_desc_path) + + +def generate_bids_metadata_for_outputs( + subject_id: str, + session_id: Optional[str], + output_dir: str, + original_t1_file: str, + original_t2_file: str, + processing_steps: List[str], + space: str = "MNI152NLin2009aSym", +) -> None: + """Generate BIDS metadata for both T1 and T2 final output files. + + Args: + subject_id: Subject identifier (e.g., 'sub-001') + session_id: Session identifier (e.g., 'ses-01') or None + output_dir: Directory containing the output files + original_t1_file: Path to original T1w file + original_t2_file: Path to original T2/FLAIR file + processing_steps: List of processing steps applied + space: Coordinate space of output images + """ + # Construct the full subject/session ID + if session_id: + fullid = f"{subject_id}_{session_id}" + else: + fullid = subject_id + + # Define output file paths + t1_output = os.path.join(output_dir, f"{fullid}_space-{space}_T1w_brain.nii.gz") + t2_output = os.path.join(output_dir, f"{fullid}_space-{space}_FLAIR_brain.nii.gz") + + # Define JSON sidecar paths + t1_json = os.path.join(output_dir, f"{fullid}_space-{space}_T1w_brain.json") + t2_json = os.path.join(output_dir, f"{fullid}_space-{space}_FLAIR_brain.json") + + # Software versions + software_versions = { + "deepMask": "latest", + "ANTs": "2.3.3+", + "N3BiasFieldCorrection": "ITK-based", + "Python": "3.8+", + "ANTsPy": "0.4.2+", + } + + # Generate T1w metadata + if os.path.exists(t1_output): + t1_metadata = generate_preprocessing_metadata( + subject_id=subject_id, + session_id=session_id, + modality="T1w", + output_path=t1_output, + original_files=[original_t1_file], + processing_steps=processing_steps, + software_versions=software_versions, + space=space, + ) + write_json_sidecar(t1_metadata, t1_json) + + # Generate T2/FLAIR metadata + if os.path.exists(t2_output): + t2_metadata = generate_preprocessing_metadata( + subject_id=subject_id, + session_id=session_id, + modality="FLAIR", + output_path=t2_output, + original_files=[original_t2_file], + processing_steps=processing_steps, + software_versions=software_versions, + space=space, + ) + write_json_sidecar(t2_metadata, t2_json) + + # Ensure dataset description exists + # Go up the directory tree to find the derivatives root + derivatives_root = output_dir + while not os.path.basename(derivatives_root).startswith("deepFCD"): + parent = os.path.dirname(derivatives_root) + if parent == derivatives_root: # Reached filesystem root + break + derivatives_root = parent + + generate_dataset_description(derivatives_root) + + +def parse_subject_session(fullid: str) -> Tuple[str, Optional[str]]: + """Parse full subject ID into subject and session components. + + Args: + fullid: Full ID like 'sub-001' or 'sub-001_ses-01' + + Returns: + Tuple of (subject_id, session_id) + """ + if "_ses-" in fullid: + subject_id, session_part = fullid.split("_ses-", 1) + session_id = f"ses-{session_part}" + return subject_id, session_id + else: + return fullid, None + + +def generate_inference_metadata( + subject_id: str, + session_id: Optional[str], + modality: str, + output_path: str, + processing_description: str, + space: str = "MNI152NLin2009aSym", +) -> Dict: + """Generate BIDS-compliant metadata for inference output files. + + Args: + subject_id: Subject identifier (e.g., 'sub-001') + session_id: Session identifier (e.g., 'ses-01') or None + modality: Type of output ('probseg-mean' or 'probseg-var') + output_path: Path to the output file + processing_description: Description of the processing + space: Coordinate space of the output image + + Returns: + Dictionary containing BIDS metadata + """ + # Get current timestamp + timestamp = datetime.datetime.now().isoformat() + + # Extract image-specific metadata if file exists + image_metadata = {} + if os.path.exists(output_path): + image_metadata = get_image_metadata(output_path) + + # Generate base metadata + metadata = { + "Description": processing_description, + "Units": "probability" if "probseg" in modality else "arbitrary", + "SpatialReference": space, + "GeneratedBy": [ + { + "Name": "deepFCD", + "Version": "1.0.0", + "Description": "Deep learning-based Focal Cortical Dysplasia detection", + "CodeURL": "https://github.com/NOEL-MNI/deepFCD", + } + ], + "ProcessingTimestamp": timestamp, + "RawSources": [], # Will be populated with preprocessed input files + } + + # Add image-specific metadata + metadata.update(image_metadata) + + # Add modality-specific information + if "probseg-mean" in modality: + metadata["StatisticalMap"] = "mean" + metadata["TaskName"] = "FCD detection" + metadata["ContrastDefinition"] = ( + "Probability of focal cortical dysplasia presence" + ) + metadata["IntendedFor"] = ( + "Statistical analysis and visualization of FCD detection results" + ) + elif "probseg-var" in modality: + metadata["StatisticalMap"] = "variance" + metadata["TaskName"] = "FCD detection uncertainty" + metadata["ContrastDefinition"] = ( + "Uncertainty/variance in FCD detection probability" + ) + metadata["IntendedFor"] = "Uncertainty quantification for FCD detection results" + + return metadata + + +def generate_inference_bids_metadata(options, uncertainty=True): + """Generate BIDS metadata for inference output files. + + Args: + options: Dictionary containing inference options including file paths + uncertainty: Whether uncertainty files were generated + """ + try: + if ( + generate_bids_metadata_for_inference_outputs is not None + and parse_subject_session is not None + ): + # Extract subject information from fullid + fullid = options.get("fullid", "") + if not fullid: + print( + "Warning: fullid not found in options, skipping BIDS metadata generation" + ) + return + + subject_id, session_id = parse_subject_session(fullid) + + # Get file paths + mean_file = options.get("test_mean_name", "") + var_file = options.get("test_var_name", "") if uncertainty else "" + + if not mean_file: + print( + "Warning: test_mean_name not found in options, skipping BIDS metadata generation" + ) + return + + # Get output directory from file path + output_dir = os.path.dirname(mean_file) + + # Determine preprocessed sources (these would be passed in the options ideally) + preprocessed_sources = [] + if "orig_files" in options: + preprocessed_sources = options["orig_files"] + + # Generate metadata + generate_bids_metadata_for_inference_outputs( + subject_id=subject_id, + session_id=session_id, + output_dir=output_dir, + mean_file_path=mean_file, + var_file_path=var_file, + preprocessed_sources=preprocessed_sources, + space="MNI152NLin2009aSym", # Assuming MNI space for inference outputs + ) + + print(f"Generated BIDS metadata for inference outputs: {fullid}") + + else: + print( + "Warning: BIDS metadata utilities not available for inference - skipping metadata generation" + ) + + except Exception as e: + print(f"Warning: Error generating BIDS metadata for inference: {e}") + + +def generate_bids_metadata_for_inference_outputs( + subject_id: str, + session_id: Optional[str], + output_dir: str, + mean_file_path: str, + var_file_path: str, + preprocessed_sources: List[str], + space: str = "MNI152NLin2009aSym", +) -> None: + """Generate BIDS metadata for inference output files. + + Args: + subject_id: Subject identifier (e.g., 'sub-001') + session_id: Session identifier (e.g., 'ses-01') or None + output_dir: Directory containing the output files + mean_file_path: Path to the mean probability segmentation file + var_file_path: Path to the variance probability segmentation file + preprocessed_sources: List of preprocessed input files used + space: Coordinate space of output images + """ + # Generate metadata for mean probability segmentation + if os.path.exists(mean_file_path): + mean_metadata = generate_inference_metadata( + subject_id=subject_id, + session_id=session_id, + modality="probseg-mean", + output_path=mean_file_path, + processing_description="Mean probability map for focal cortical dysplasia detection using deep learning", + space=space, + ) + mean_metadata["RawSources"] = preprocessed_sources + + mean_json_path = mean_file_path.replace(".nii.gz", ".json").replace( + ".nii", ".json" + ) + write_json_sidecar(mean_metadata, mean_json_path) + + # Generate metadata for variance probability segmentation + if os.path.exists(var_file_path): + var_metadata = generate_inference_metadata( + subject_id=subject_id, + session_id=session_id, + modality="probseg-var", + output_path=var_file_path, + processing_description="Variance/uncertainty map for focal cortical dysplasia detection using deep learning", + space=space, + ) + var_metadata["RawSources"] = preprocessed_sources + + var_json_path = var_file_path.replace(".nii.gz", ".json").replace( + ".nii", ".json" + ) + write_json_sidecar(var_metadata, var_json_path) + + # Ensure dataset description exists in the derivatives root + derivatives_root = output_dir + while not os.path.basename(derivatives_root).startswith("deepFCD"): + parent = os.path.dirname(derivatives_root) + if parent == derivatives_root: # Reached filesystem root + break + derivatives_root = parent + + generate_dataset_description( + derivatives_root, + pipeline_name="deepFCD", + description="Deep learning-based Focal Cortical Dysplasia detection", + ) diff --git a/deepMask/utils/deepmask.py b/deepMask/utils/deepmask.py index eb53dd6..413af1c 100644 --- a/deepMask/utils/deepmask.py +++ b/deepMask/utils/deepmask.py @@ -1,22 +1,39 @@ import fileinput -import nibabel as nib -import numpy as np import os import re import subprocess import time -import torch +import nibabel as nib +import numpy as np +import torch from nibabel import load as load_nii from skimage import transform as skt from sklearn.utils import class_weight from torch.autograd import Variable -def deepMask(args, model, id, t1w_np, t2w_np, t1w_fname, t2w_fname, nifti=True): +def deepMask( + args, + model, + id, + t1w_np, + t2w_np, + t1w_fname, + t2w_fname, + nifti=True, + return_paths=False, +): dst = args.outdir case_id = id + # probablity segmentation and discrete segmentation output filenames + probseg = case_id + "_space-MNI152_desc-deepMask_probseg.nii.gz" + probseg_path = os.path.join(dst, probseg) + + dseg = case_id + "_space-MNI152_desc-deepMask_dseg.nii.gz" + dseg_path = os.path.join(dst, dseg) + model.eval() start_time = time.time() @@ -37,7 +54,7 @@ def deepMask(args, model, id, t1w_np, t2w_np, t1w_fname, t2w_fname, nifti=True): output = output.cpu() output = output.data.numpy() - print("save {}".format(case_id)) + print(f"save {case_id}") if not os.path.exists(os.path.join(dst)): os.makedirs(os.path.join(dst), exist_ok=True) @@ -52,11 +69,11 @@ def deepMask(args, model, id, t1w_np, t2w_np, t1w_fname, t2w_fname, nifti=True): anti_aliasing=True, ) nii_out = nib.Nifti1Image(output, affine, header) - nii_out.to_filename(os.path.join(dst, case_id + "_vnet_maskpred.nii.gz")) + nii_out.to_filename(probseg_path) elapsed_time = time.time() - start_time print("=" * 70) - print("=> inference time: {} seconds".format(round(elapsed_time, 2))) + print(f"=> inference time: {round(elapsed_time, 2)} seconds") print("=" * 70) # config = './utils/dense3dCrf/config_densecrf.txt' @@ -71,16 +88,18 @@ def deepMask(args, model, id, t1w_np, t2w_np, t1w_fname, t2w_fname, nifti=True): out_shape, config, dst, - os.path.join(dst, case_id + "_vnet_maskpred.nii.gz"), + os.path.join(probseg_path), ) elapsed_time = time.time() - start_time print("=" * 70) - print("=> dense 3D-CRF inference time: {} seconds".format(round(elapsed_time, 2))) + print(f"=> dense 3D-CRF inference time: {round(elapsed_time, 2)} seconds") print("=" * 70) - fname = os.path.join(dst, case_id + "_denseCrf3dSegmMap.nii.gz") - seg_map = load_nii(fname).get_fdata() - return seg_map + seg_map = load_nii(dseg_path).get_fdata() + if return_paths: + return seg_map, (probseg_path, dseg_path) + else: + return seg_map def normalize_resize_to_tensor(t1w_np, t2w_np, args): @@ -134,8 +153,8 @@ def denseCRF(id, t1, t2, input_shape, config, out_dir, pred_labels): def datestr(): now = time.gmtime() - return "{}{:02}{:02}_{:02}{:02}".format( - now.tm_year, now.tm_mon, now.tm_mday, now.tm_hour, now.tm_min + return ( + f"{now.tm_year}{now.tm_mon:02}{now.tm_mday:02}_{now.tm_hour:02}{now.tm_min:02}" ) @@ -159,7 +178,6 @@ def compute_weights(labels, binary=False): def dice_gross(image, label, empty_score=1.0): - image = (image > 0).astype(np.int_) label = (label > 0).astype(np.int_) @@ -168,9 +186,7 @@ def dice_gross(image, label, empty_score=1.0): if image.shape != label.shape: raise ValueError( - "Shape mismatch: image {0} and label {1} must have the same shape.".format( - image.shape, label.shape - ) + f"Shape mismatch: image {image.shape} and label {label.shape} must have the same shape." ) im_sum = image.sum() + label.sum() diff --git a/deepMask/utils/dense3dCrf/config_densecrf.txt b/deepMask/utils/dense3dCrf/config_densecrf.txt index 7af0aea..6bcbb38 100644 --- a/deepMask/utils/dense3dCrf/config_densecrf.txt +++ b/deepMask/utils/dense3dCrf/config_densecrf.txt @@ -95,7 +95,7 @@ # prefixForOutputSegmentationMap: Essentially the filename for the resulting segmentation map (default is denseCrf3dOutputSegm). Will be saved as a .nii.gz automatically. -prefixForOutputSegmentationMap -_denseCrf3dSegmMap +_space-MNI152_desc-deepMask_dseg # prefixForOutputProbabilityMaps: Prefix of the filenames with which to save the resulting probability maps (default is denseCrf3dProbMapClass). # Each probability map will be saved as "prefix" + numberOfClass + ".nii.gz" automatically. diff --git a/deepMask/utils/dense3dCrf/config_densecrf_t1.txt b/deepMask/utils/dense3dCrf/config_densecrf_t1.txt index 9cbf8ea..74885c5 100644 --- a/deepMask/utils/dense3dCrf/config_densecrf_t1.txt +++ b/deepMask/utils/dense3dCrf/config_densecrf_t1.txt @@ -94,7 +94,7 @@ # prefixForOutputSegmentationMap: Essentially the filename for the resulting segmentation map (default is denseCrf3dOutputSegm). Will be saved as a .nii.gz automatically. -prefixForOutputSegmentationMap -_denseCrf3dSegmMap +__space-MNI152_desc-deepMask_dseg # prefixForOutputProbabilityMaps: Prefix of the filenames with which to save the resulting probability maps (default is denseCrf3dProbMapClass). # Each probability map will be saved as "prefix" + numberOfClass + ".nii.gz" automatically. diff --git a/deepMask/utils/helpers.py b/deepMask/utils/helpers.py index 4b24907..e91bf9c 100644 --- a/deepMask/utils/helpers.py +++ b/deepMask/utils/helpers.py @@ -1,4 +1,7 @@ -import os, random, string +import os +import random +import string + from ants import apply_ants_transform_to_image, image_write, read_transform diff --git a/deepMask/utils/image_processing.py b/deepMask/utils/image_processing.py index cf0e2fb..a65d392 100644 --- a/deepMask/utils/image_processing.py +++ b/deepMask/utils/image_processing.py @@ -1,18 +1,47 @@ -import ants -import os import logging +import multiprocessing +import os import time + +import ants + # import matplotlib as mpl # mpl.use("Qt5Agg") import matplotlib.pyplot as plt -import multiprocessing import numpy as np + # import zipfile if os.environ.get("BRAIN_MASKING") == "cpu": from antspynet.utilities import brain_extraction -from .deepmask import * -from .helpers import * from matplotlib.backends.backend_pdf import PdfPages + +from .deepmask import deepMask +from .helpers import apply_transform, random_case_id + +# Import BIDS metadata utilities (try relative, package, and legacy locations) +try: + from ...utils.bids_metadata import ( + generate_bids_metadata_for_outputs, + parse_subject_session, + ) +except Exception: + try: + from utils.bids_metadata import ( + generate_bids_metadata_for_outputs, + parse_subject_session, + ) + except Exception: + try: + from bids_metadata import ( + generate_bids_metadata_for_outputs, + parse_subject_session, + ) + except Exception: + print( + "Warning: Could not import BIDS metadata utilities. Metadata generation will be skipped." + ) + generate_bids_metadata_for_outputs = None + parse_subject_session = None # read pngs to save as pdf from PIL import Image @@ -25,7 +54,7 @@ try: os.remove(logfile) except OSError as e: - print("Error: %s - %s." % (e.filename, e.strerror)) + print(f"Error: {e.filename} - {e.strerror}.") handler = logging.FileHandler(logfile) handler.setLevel(logging.INFO) @@ -47,7 +76,7 @@ def __init__( id, t1=None, t2=None, - output_suffix="_brain_final.nii.gz", + output_suffix="_brain.nii.gz", output_dir=None, template=None, transform="Affine", @@ -57,7 +86,7 @@ def __init__( QC=None, preprocess=True, ): - super(noelImageProcessor, self).__init__() + super().__init__() self._id = id self._t1file = t1 self._t2file = t2 @@ -77,11 +106,11 @@ def __load_nifti_file(self): logger.info("loading nifti files") print("loading nifti files") self._mni = self._template - if self._t1file == None and self._t2file == None: - logger.warn("Please load the data first", "The data is invalid/missing") + if self._t1file is None and self._t2file is None: + logger.warning("Please load the data first", "The data is invalid/missing") return - if self._t1file != None and self._t2file != None: + if self._t1file is not None and self._t2file is not None: self._t1 = ants.image_read(self._t1file) self._t2 = ants.image_read(self._t2file) self._icbm152 = ants.image_read(self._mni) @@ -89,7 +118,7 @@ def __load_nifti_file(self): def __register_to_MNI_space(self): logger.info("registration to MNI template space") print("registration to MNI template space") - if self._t1file != None and self._t2file != None: + if self._t1file is not None and self._t2file is not None: self._t1_reg = ants.registration( fixed=self._icbm152, moving=self._t1, @@ -100,18 +129,33 @@ def __register_to_MNI_space(self): moving=self._t2, type_of_transform=self._transform, ) - # create directory to store transforms - xfmdir = os.path.join(self._outputdir, "transforms") + # create directory to store transforms in BIDS-compliant location + # transforms should be at session level in 'xfm' directory, not inside 'anat' + if "_ses-" in self._id: + # For sessions: go up from anat/ to session level and create xfm/ + session_dir = os.path.dirname(self._outputdir) # go up from anat/ + xfmdir = os.path.join(session_dir, "xfm") + else: + # For no sessions: go up from anat/ to subject level and create xfm/ + subject_dir = os.path.dirname(self._outputdir) # go up from anat/ + xfmdir = os.path.join(subject_dir, "xfm") + if not os.path.exists(xfmdir): os.makedirs(xfmdir) - # write forward transforms to xfmdir + # write forward transforms to xfmdir with BIDS BEP014 naming ants.write_transform( ants.read_transform(self._t1_reg["fwdtransforms"][0]), - os.path.join(xfmdir, self._id + "_t1-native-to-MNI152.mat"), + os.path.join( + xfmdir, + self._id + "_from-T1w_to-MNI152_mode-image_xfm.mat", + ), ) ants.write_transform( ants.read_transform(self._t2_reg["fwdtransforms"][0]), - os.path.join(xfmdir, self._id + "_t2-native-to-MNI152.mat"), + os.path.join( + xfmdir, + self._id + "_from-FLAIR_to-MNI152_mode-image_xfm.mat", + ), ) # self._t2_reg = ants.apply_transforms(fixed = self._t1_reg['warpedmovout'], moving = self._t2, transformlist = self._t1_reg['fwdtransforms']) # ants.image_write( self._t1_reg['warpedmovout'], self._t1regfile) @@ -122,7 +166,7 @@ def __bias_correction(self): "performing {} bias correction".format("N3" if self._usen3 else "N4") ) print("performing {} bias correction".format("N3" if self._usen3 else "N4")) - if self._t1file != None and self._t2file != None: + if self._t1file is not None and self._t2file is not None: if self._usen3: self._t1_n4 = ( ants.iMath( @@ -158,10 +202,10 @@ def __bias_correction(self): * 100 ) self._t1regfile = os.path.join( - self._outputdir, self._id + "_t1_final.nii.gz" + self._outputdir, self._id + "_space-MNI152_T1w.nii.gz" ) self._t2regfile = os.path.join( - self._outputdir, self._id + "_t2_final.nii.gz" + self._outputdir, self._id + "_space-MNI152_FLAIR.nii.gz" ) ants.image_write(self._t1_n4, self._t1regfile) ants.image_write(self._t2_n4, self._t2regfile) @@ -169,10 +213,10 @@ def __bias_correction(self): def __skull_stripping(self): # specify the output filenames for brain extracted images self._t1brainfile = os.path.join( - self._outputdir, self._id + "_t1" + self._outsuffix + self._outputdir, self._id + "_space-MNI152_T1w" + self._outsuffix ) self._t2brainfile = os.path.join( - self._outputdir, self._id + "_t2" + self._outsuffix + self._outputdir, self._id + "_space-MNI152_FLAIR" + self._outsuffix ) if os.environ.get("BRAIN_MASKING") == "cpu": logger.info("performing brain extraction using ANTsPyNet") @@ -215,7 +259,7 @@ def __skull_stripping(self): else: logger.info("performing brain extraction using deepMask") print("performing brain extraction using deepMask") - if self._t1file != None and self._t2file != None: + if self._t1file is not None and self._t2file is not None: if self._preprocess: mask = deepMask( self._args, @@ -227,8 +271,8 @@ def __skull_stripping(self): self._t2regfile, ) self._mask = self._t1_n4.new_image_like(mask) - ants.image_write(self._t1_n4 * self._mask, self._t1brainfile) - ants.image_write(self._t2_n4 * self._mask, self._t2brainfile) + self._t1_brain = self._t1_n4 * self._mask + self._t2_brain = self._t2_n4 * self._mask else: mask = deepMask( self._args, @@ -240,8 +284,77 @@ def __skull_stripping(self): self._t2file, ) self._mask = self._t1.new_image_like(mask) - ants.image_write(self._t1 * self._mask, self._t1brainfile) - ants.image_write(self._t2 * self._mask, self._t2brainfile) + self._t1_brain = self._t1 * self._mask + self._t2_brain = self._t2 * self._mask + ants.image_write(self._t1_brain, self._t1brainfile) + ants.image_write(self._t2_brain, self._t2brainfile) + + # Generate BIDS metadata for the output files + self.__generate_bids_metadata() + + def __generate_bids_metadata(self): + """Generate BIDS-compliant metadata for preprocessed output files.""" + logger.info("generating BIDS metadata for preprocessed outputs") + print("generating BIDS metadata for preprocessed outputs") + + try: + if ( + generate_bids_metadata_for_outputs is not None + and parse_subject_session is not None + ): + # Parse subject and session from ID + subject_id, session_id = parse_subject_session(self._id) + + # Determine processing steps based on what was actually done + processing_steps = [] + if self._preprocess: + processing_steps.extend( + [ + "Registration to MNI152 template space using ANTs", + "N3 bias field correction using ANTs", + ] + ) + + # Always include brain extraction since it's always performed + if os.environ.get("BRAIN_MASKING") == "cpu": + processing_steps.append("Brain extraction using ANTsPyNet") + else: + processing_steps.append( + "Brain extraction using deepMask neural network" + ) + + if self._preprocess: + processing_steps.append("Intensity normalization") + else: + processing_steps.append( + "Intensity normalization (min-max scaling to 0-100)" + ) + + # Generate metadata for both output files + generate_bids_metadata_for_outputs( + subject_id=subject_id, + session_id=session_id, + output_dir=self._outputdir, + original_t1_file=self._t1file, + original_t2_file=self._t2file, + processing_steps=processing_steps, + space="MNI152" if self._preprocess else "native", + ) + + logger.info("BIDS metadata generation completed successfully") + print("BIDS metadata generation completed successfully") + + else: + logger.warning( + "BIDS metadata utilities not available - skipping metadata generation" + ) + print( + "Warning: BIDS metadata utilities not available - skipping metadata generation" + ) + + except Exception as e: + logger.warning(f"Error generating BIDS metadata: {e}") + print(f"Warning: Error generating BIDS metadata: {e}") def __apply_transforms(self): logger.info( @@ -249,19 +362,18 @@ def __apply_transforms(self): ) print("apply transforms to project outputs back to the native input space") self._t1_native = apply_transform( - self._mask, self._t1, self._t1_reg["fwdtransforms"][0], invert_xfrm=True + self._t1_brain, self._t1, self._t1_reg["fwdtransforms"][0], invert_xfrm=True ) self._t2_native = apply_transform( - self._mask, self._t2, self._t2_reg["fwdtransforms"][0], invert_xfrm=True + self._t2_brain, self._t2, self._t2_reg["fwdtransforms"][0], invert_xfrm=True ) - mask_suffix = "_brain_mask_native.nii.gz" - # write skull-stripped versions of the brain mask in native space + # write skull-stripped versions of the brain in native space ants.image_write( - self._t1_native, self._t1brainfile.replace(self._outsuffix, mask_suffix) + self._t1_native, self._t1brainfile.replace("MNI152", "orig") ) ants.image_write( - self._t2_native, self._t2brainfile.replace(self._outsuffix, mask_suffix) + self._t2_native, self._t2brainfile.replace("MNI152", "orig") ) def __generate_QC_maps(self): @@ -269,7 +381,7 @@ def __generate_QC_maps(self): qcdir = os.path.join(self._args.tmpdir, "qc") if not os.path.exists(qcdir): os.makedirs(qcdir) - if self._t1file != None and self._t2file != None: + if self._t1file is not None and self._t2file is not None: self._icbm152.plot( overlay=self._t1, overlay_alpha=0.5, @@ -383,28 +495,9 @@ def __organize_and_cleanup(self): _move_suffix = { "_denseCrf3dProbMapClass1.nii.gz", "_denseCrf3dProbMapClass0.nii.gz", - "_vnet_maskpred.nii.gz", } - _rename_suffix = "_denseCrf3dSegmMap.nii.gz" - # _final_suffix = "_final.nii.gz" - _native_suffix = "_native.nii.gz" for file in os.listdir(self._outputdir): - if file.endswith(_rename_suffix): - src = os.path.join(self._outputdir, file) - dst = os.path.join( - self._outputdir, - file.replace(_rename_suffix, "_brain_mask_final.nii.gz"), - ) - os.renames(src, dst) - if file.endswith(_native_suffix): - src = os.path.join(self._outputdir, file) - dst = os.path.join(self._outputdir, "native", file) - os.renames(src, dst) - # if file.endswith(_final_suffix): - # src = os.path.join(self._outputdir, file) - # dst = os.path.join(self._outputdir, "final", file) - # os.renames(src, dst) for _suffix in _move_suffix: if file.endswith(_suffix): src = os.path.join(self._outputdir, file) @@ -451,14 +544,8 @@ def pipeline(self): self.__organize_and_cleanup() end = time.time() - print( - "pipeline processing time elapsed: {} seconds".format( - np.round(end - start, 1) - ) - ) + print(f"pipeline processing time elapsed: {np.round(end - start, 1)} seconds") logger.info( - "pipeline processing time elapsed: {} seconds".format( - np.round(end - start, 1) - ) + f"pipeline processing time elapsed: {np.round(end - start, 1)} seconds" ) logger.info("*********************************************") diff --git a/tests/test_integration.py b/tests/test_integration.py index c76725e..383659f 100644 --- a/tests/test_integration.py +++ b/tests/test_integration.py @@ -37,12 +37,32 @@ def brain_mask_paths(self, test_config): """Paths to predicted and reference brain masks.""" patient_id = test_config["patient_id"] - # deepMask output path (in subject subdirectory) - pred_mask = ( - test_config["pred_dir"] - / patient_id - / f"{patient_id}_brain_mask_final.nii.gz" - ) + # deepMask output directory for the subject + subject_outdir = test_config["pred_dir"] / patient_id + + # Candidate predicted mask filenames (priority order) + candidates = [ + f"{patient_id}_brain_mask_final.nii.gz", + f"{patient_id}_space-MNI152_label-brain_dseg.nii.gz", + f"{patient_id}_space-MNI152_desc-deepMask_dseg.nii.gz", + f"{patient_id}_space-MNI152_desc-deepMask_probseg.nii.gz", + f"{patient_id}_space-MNI152_desc-deepMask_dseg.nii.gz", + ] + + pred_mask = None + if subject_outdir.exists(): + for c in candidates: + p = subject_outdir / c + if p.exists(): + pred_mask = p + break + + # Fallback: look for any file with 'brain_mask' or 'desc-deepMask' in the name + if pred_mask is None: + for f in subject_outdir.iterdir(): + if f.is_file() and ("brain_mask" in f.name or "desc-deepMask" in f.name or f.name.endswith("_dseg.nii.gz")): + pred_mask = f + break # Reference brain mask from deepFCD ref_mask = ( @@ -198,19 +218,30 @@ def test_output_files_created(self, test_config): pred_dir = ( test_config["pred_dir"] / patient_id ) # Output is in subject subdirectory + # Logical outputs and candidate filename patterns produced by different deepMask versions + expected = { + "brain_mask": [ + f"{patient_id}_brain_mask_final.nii.gz", + f"{patient_id}_space-MNI152_desc-deepMask_dseg.nii.gz", + f"{patient_id}_space-MNI152_desc-deepMask_probseg.nii.gz", + f"{patient_id}_space-MNI152_desc-deepMask_dseg.nii.gz", + f"{patient_id}_space-MNI152_label-brain_dseg.nii.gz", + ], + "t1_brain": [f"{patient_id}_space-MNI152_T1w_brain.nii.gz", f"{patient_id}_t1_brain_final.nii.gz"], + "flair_brain": [f"{patient_id}_space-MNI152_FLAIR_brain.nii.gz", f"{patient_id}_t2_brain_final.nii.gz"], + } - expected_files = [ - f"{patient_id}_brain_mask_final.nii.gz", - f"{patient_id}_t1_brain_final.nii.gz", - f"{patient_id}_t2_brain_final.nii.gz", - ] - - for filename in expected_files: - filepath = pred_dir / filename - assert filepath.exists(), f"Expected output file not found: {filepath}" + for logical, candidates in expected.items(): + found = None + for filename in candidates: + filepath = pred_dir / filename + if filepath.exists(): + found = filepath + break + assert found is not None, f"Expected output for '{logical}' not found. Searched: {candidates} in {pred_dir}" # Check file is not empty - assert filepath.stat().st_size > 0, f"Output file is empty: {filepath}" + assert found.stat().st_size > 0, f"Output file is empty: {found}" def test_skull_stripped_images(self, test_config): """Test properties of skull-stripped images.""" @@ -218,9 +249,8 @@ def test_skull_stripped_images(self, test_config): pred_dir = ( test_config["pred_dir"] / patient_id ) # Output is in subject subdirectory - # Test T1 skull-stripped image - t1_stripped = pred_dir / f"{patient_id}_t1_brain_final.nii.gz" + t1_stripped = pred_dir / f"{patient_id}_space-MNI152_T1w_brain_final.nii.gz" if t1_stripped.exists(): img = ants.image_read(str(t1_stripped)) @@ -233,7 +263,9 @@ def test_skull_stripped_images(self, test_config): assert img_data.max() > img_data.min(), "No intensity variation" # Test FLAIR skull-stripped image - flair_stripped = pred_dir / f"{patient_id}_t2_brain_final.nii.gz" + flair_stripped = ( + pred_dir / f"{patient_id}_space-MNI152_FLAIR_brain_final.nii.gz" + ) if flair_stripped.exists(): img = ants.image_read(str(flair_stripped))