|
| 1 | +import sys, os |
| 2 | +from typing import List |
| 3 | + |
| 4 | +import numpy as np |
| 5 | +import pathlib |
| 6 | +import torch |
| 7 | +import torch.nn as nn |
| 8 | +import torchvision |
| 9 | +import torchxrayvision as xrv |
| 10 | +from ... import utils |
| 11 | + |
| 12 | +class SexModel(nn.Module): |
| 13 | + """This model is from the MIRA (Medical Image Representation and Analysis) |
| 14 | + project and is trained to predict patient sex from a chest X-ray. The model |
| 15 | + uses a ResNet34 architecture and is trained on CheXpert dataset. The |
| 16 | + native resolution of the model is 224x224. Images are scaled automatically. |
| 17 | +
|
| 18 | + `Demo notebook <https://github.com/mlmed/torchxrayvision/blob/main/scripts/sex_prediction.ipynb>`__ |
| 19 | +
|
| 20 | + Publication: `Algorithmic encoding of protected characteristics in chest X-ray disease detection models <https://www.thelancet.com/journals/ebiom/article/PIIS2352-3964(23)00032-4/fulltext>`__ |
| 21 | + B. Glocker, C. Jones, M. Bernhardt, S. Winzeck |
| 22 | + eBioMedicine. Volume 89, 104467, 2023. |
| 23 | +
|
| 24 | + .. code-block:: python |
| 25 | +
|
| 26 | + model = xrv.baseline_models.mira.SexModel() |
| 27 | +
|
| 28 | + image = xrv.utils.load_image('00027426_000.png') |
| 29 | + image = torch.from_numpy(image)[None,...] |
| 30 | +
|
| 31 | + pred = model(image) |
| 32 | +
|
| 33 | + model.targets[torch.argmax(pred)] |
| 34 | + # 'Male' or 'Female' |
| 35 | +
|
| 36 | + .. code-block:: bibtex |
| 37 | +
|
| 38 | + @article{MIRA2023, |
| 39 | + title = {Chexploration: Medical Image Representation and Analysis}, |
| 40 | + author = {MIRA Team}, |
| 41 | + journal = {biomedia-mira/chexploration}, |
| 42 | + url = {https://github.com/biomedia-mira/chexploration}, |
| 43 | + year = {2023} |
| 44 | + } |
| 45 | +
|
| 46 | + """ |
| 47 | + |
| 48 | + targets: List[str] = ["Male", "Female"] |
| 49 | + """""" |
| 50 | + |
| 51 | + def __init__(self, weights=True): |
| 52 | + |
| 53 | + super(SexModel, self).__init__() |
| 54 | + |
| 55 | + # Use ResNet34 architecture as in the original MIRA implementation |
| 56 | + self.model = torchvision.models.resnet34(weights=None) |
| 57 | + n_classes = 2 # Male/Female |
| 58 | + |
| 59 | + # Replace the final fully connected layer |
| 60 | + num_features = self.model.fc.in_features # 512 for ResNet34 |
| 61 | + self.model.fc = nn.Linear(num_features, n_classes) |
| 62 | + |
| 63 | + if weights: |
| 64 | + |
| 65 | + url = 'https://github.com/mlmed/torchxrayvision/releases/download/v1/mira_sex_resnet-all_epoch_13-step_7125.ckpt' |
| 66 | + |
| 67 | + weights_filename = "mira_sex_resnet-all_epoch_13-step_7125.ckpt" |
| 68 | + weights_storage_folder = os.path.expanduser(os.path.join("~", ".torchxrayvision", "models_data")) |
| 69 | + self.weights_filename_local = os.path.expanduser(os.path.join(weights_storage_folder, weights_filename)) |
| 70 | + |
| 71 | + if not os.path.isfile(self.weights_filename_local): |
| 72 | + print("Downloading weights...") |
| 73 | + print("If this fails you can run `wget {} -O {}`".format(url, self.weights_filename_local)) |
| 74 | + pathlib.Path(weights_storage_folder).mkdir(parents=True, exist_ok=True) |
| 75 | + try: |
| 76 | + xrv.utils.download(url, self.weights_filename_local) |
| 77 | + except Exception as e: |
| 78 | + print(f"Failed to download weights from {url}") |
| 79 | + print(f"Please manually place the weights file '{weights_filename}' in {weights_storage_folder}") |
| 80 | + raise e |
| 81 | + |
| 82 | + try: |
| 83 | + ckpt = torch.load(self.weights_filename_local, map_location="cpu") |
| 84 | + |
| 85 | + # Extract state dict from PyTorch Lightning checkpoint |
| 86 | + if 'state_dict' in ckpt: |
| 87 | + state_dict = ckpt['state_dict'] |
| 88 | + # Remove 'model.' prefix from keys if present (common in PyTorch Lightning) |
| 89 | + new_state_dict = {} |
| 90 | + for key, value in state_dict.items(): |
| 91 | + if key.startswith('model.'): |
| 92 | + new_key = key[6:] # Remove 'model.' prefix |
| 93 | + new_state_dict[new_key] = value |
| 94 | + else: |
| 95 | + new_state_dict[key] = value |
| 96 | + self.model.load_state_dict(new_state_dict) |
| 97 | + else: |
| 98 | + # If it's a regular PyTorch checkpoint |
| 99 | + self.model.load_state_dict(ckpt) |
| 100 | + |
| 101 | + except Exception as e: |
| 102 | + print("Loading failure. Check weights file:", self.weights_filename_local) |
| 103 | + print("Error:", str(e)) |
| 104 | + raise e |
| 105 | + |
| 106 | + self.model = self.model.eval() # Must be in eval mode to work correctly |
| 107 | + |
| 108 | + # Define targets - order matters and should match training |
| 109 | + self.targets = ["Male" ,"Female"] # 0: Male, 1: Female |
| 110 | + |
| 111 | + def forward(self, x): |
| 112 | + # Convert single channel to RGB (pseudo-RGB as in original implementation) |
| 113 | + x = x.repeat(1, 3, 1, 1) |
| 114 | + |
| 115 | + # Resize to 224x224 as expected by ResNet |
| 116 | + x = utils.fix_resolution(x, 224, self) |
| 117 | + utils.warn_normalization(x) |
| 118 | + |
| 119 | + # Convert from torchxrayvision range [-1024, 1024] to [0, 1] |
| 120 | + x = (x + 1024) / 2048 |
| 121 | + |
| 122 | + x = x*255 # Scale to [0, 255] |
| 123 | + |
| 124 | + # Forward pass through ResNet |
| 125 | + y = self.model(x) |
| 126 | + |
| 127 | + return y |
| 128 | + |
| 129 | + def __repr__(self): |
| 130 | + return "MIRA-SexModel-resnet34" |
0 commit comments