Skip to content

Commit 576fe9b

Browse files
joewallworkjatkinson1000Copilot
authored
Allow pre-trained models with pt2ts (#565)
* Support loading pretrained torchvision models * Add model_weights argument * Support setting precision * Make use of pt2ts script in ResNet example * Switch order of input in load_pytorch * Add unit tests covering new functionality * Add changelog entry * Update pt2ts README and helptext --------- Co-authored-by: Jack Atkinson <109271713+jatkinson1000@users.noreply.github.com> Co-authored-by: Copilot <198982749+Copilot@users.noreply.github.com>
1 parent 88ba04d commit 576fe9b

12 files changed

Lines changed: 335 additions & 154 deletions

File tree

CHANGELOG.md

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,9 @@ For specific details see the [FTorch online documentation](https://cambridge-icc
2020
[#555](https://github.com/Cambridge-ICCS/FTorch/pull/555)
2121
- Changed process for saving TorchScript models
2222
[#555](https://github.com/Cambridge-ICCS/FTorch/pull/555)
23+
- Extended `pt2ts` to account for pre-trained torchvision models, weights, and
24+
precision
25+
[#565](https://github.com/Cambridge-ICCS/FTorch/pull/565)
2326
- Provide worked example and documentation on differentiating through calls to
2427
`torch_model_forward`.
2528
[#486](https://github.com/Cambridge-ICCS/FTorch/pull/486)

examples/03_ResNet/CMakeLists.txt

Lines changed: 22 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -35,21 +35,36 @@ target_link_libraries(resnet_infer_fortran PRIVATE FTorch::ftorch)
3535
if(CMAKE_BUILD_TESTS)
3636
include(CTest)
3737

38-
# 1. Check the PyTorch model runs and its outputs meet expectations
38+
# 1. Write the PyTorch model out and check it runs and its outputs meet
39+
# expectations
3940
add_test(
4041
NAME example_resnet_resnet18
4142
COMMAND ${Python_EXECUTABLE} ${PROJECT_SOURCE_DIR}/resnet18.py
42-
WORKING_DIRECTORY ${PROJECT_SOURCE_DIR})
43+
WORKING_DIRECTORY ${PROJECT_BINARY_DIR})
44+
45+
# 2. Run the pt2ts script to convert the model file to TorchScript format
46+
# and run basic checks
47+
add_test(
48+
NAME example_resnet_pt2ts
49+
COMMAND pt2ts resnet18
50+
--input_tensor_file
51+
${PROJECT_BINARY_DIR}/pytorch_resnet18_input_tensor_cpu.pt
52+
--output_model_file
53+
${PROJECT_BINARY_DIR}/torchscript_resnet18_model_cpu.pt
54+
--model_weights IMAGENET1K_V1
55+
--precision float32
56+
--test)
4357

44-
# 2. Check the model is saved to file in the expected location with the
45-
# write_torchscript.py script
58+
# 3. Check the model can be loaded from file and run in Python and that its
59+
# outputs meet expectations
4660
add_test(
47-
NAME example_resnet_write_torchscript
48-
COMMAND ${Python_EXECUTABLE} ${PROJECT_SOURCE_DIR}/write_torchscript.py
61+
NAME example_resnet_resnet_infer_python
62+
COMMAND ${Python_EXECUTABLE} ${PROJECT_SOURCE_DIR}/resnet_infer_python.py
4963
--filepath ${PROJECT_BINARY_DIR}
64+
--data_dir ${PROJECT_SOURCE_DIR}/data
5065
WORKING_DIRECTORY ${PROJECT_BINARY_DIR})
5166

52-
# 3. Check the model can be loaded from file and run in Fortran and that its
67+
# 4. Check the model can be loaded from file and run in Fortran and that its
5368
# outputs meet expectations
5469
add_test(
5570
NAME example_resnet_resnet_infer_fortran

examples/03_ResNet/resnet18.py

Lines changed: 106 additions & 67 deletions
Original file line numberDiff line numberDiff line change
@@ -1,16 +1,21 @@
11
"""Load and run pretrained ResNet-18 from TorchVision."""
22

3+
import os
4+
from math import isclose
5+
36
import numpy as np
47
import torch
58
import torchvision
69
from PIL import Image
710

811

9-
# Initialize everything
1012
def initialize(precision: torch.dtype) -> torch.nn.Module:
1113
"""
1214
Download pre-trained ResNet-18 model and prepare for inference.
1315
16+
NOTE: These steps duplicate the process for loading pre-trained models in the pt2ts
17+
script and are provided here for testing.
18+
1419
Parameters
1520
----------
1621
precision: torch.dtype
@@ -36,78 +41,21 @@ def initialize(precision: torch.dtype) -> torch.nn.Module:
3641
return model
3742

3843

39-
def run_model(model: torch.nn.Module, precision: type) -> None:
40-
"""
41-
Run the pre-trained ResNet-18 with an example image of a dog.
42-
43-
Parameters
44-
----------
45-
model: torch.nn.Module
46-
Pretrained model to run.
47-
precision: type
48-
NumPy data type to save input tensor.
49-
"""
50-
# Transform image into the form expected by the pre-trained model, using the mean
51-
# and standard deviation from the ImageNet dataset
52-
# See: https://pytorch.org/vision/0.8/models.html
53-
image_filename = "data/dog.jpg"
54-
input_image = Image.open(image_filename)
55-
preprocess = torchvision.transforms.Compose(
56-
[
57-
torchvision.transforms.Resize(256),
58-
torchvision.transforms.CenterCrop(224),
59-
torchvision.transforms.ToTensor(),
60-
torchvision.transforms.Normalize(
61-
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
62-
),
63-
]
64-
)
65-
input_tensor = preprocess(input_image)
66-
input_batch = input_tensor.unsqueeze(0)
67-
68-
print("Saving input batch...", end="")
69-
# Transpose input before saving so order consistent with Fortran
70-
np_input = np.array(input_batch.numpy().transpose().flatten(), dtype=precision) # type: np.typing.NDArray
71-
72-
# Save data as binary
73-
np_input.tofile("data/image_tensor.dat")
74-
75-
# Load saved data to check it was saved correctly
76-
np_data = np.fromfile("data/image_tensor.dat", dtype=precision) # type: np.typing.NDArray
77-
78-
# Reshape to original tensor shape
79-
tensor_shape = np.array(input_batch.numpy()).transpose().shape
80-
np_data = np_data.reshape(tensor_shape)
81-
np_data = np_data.transpose()
82-
if not np.array_equal(np_data, input_batch.numpy()):
83-
result_error = (
84-
"Image read from saved file (data/image_tensor.dat) does not match"
85-
"processed data read from data/dog.jpg expected value."
86-
)
87-
raise ValueError(result_error)
88-
print("done.")
89-
90-
print("Running ResNet-18 model for input...", end="")
91-
with torch.inference_mode():
92-
output = model(input_batch)
93-
print("done.")
94-
95-
print_top_results(output)
96-
97-
98-
def print_top_results(output: torch.Tensor) -> None:
44+
def print_top_results(output: torch.Tensor, data_dir: str) -> None:
9945
"""Print top 5 results.
10046
10147
Parameters
10248
----------
10349
output: torch.Tensor
10450
Output from ResNet-18.
51+
data_dir : str
52+
Path to data directory
10553
"""
10654
# Run a softmax to get probabilities
10755
probabilities = torch.nn.functional.softmax(output[0], dim=0)
10856

10957
# Read ImageNet labels from text file
110-
cats_filename = "data/categories.txt"
58+
cats_filename = os.path.join(data_dir, "categories.txt")
11159
categories = np.genfromtxt(cats_filename, dtype=str, delimiter="\n")
11260

11361
# Show top categories per image
@@ -128,15 +76,53 @@ def print_top_results(output: torch.Tensor) -> None:
12876
]
12977
if not np.allclose(top5_prob, expected_prob, rtol=1e-5):
13078
result_error = (
131-
f"Predicted top 5 probabilities:\n{top5_prob}\ndo not match the"
132-
"expected values:\n{expected_prob}"
79+
f"Predicted top 5 probabilities:\n{top5_prob}\ndo not match the expected"
80+
f" values:\n{expected_prob}"
81+
)
82+
raise ValueError(result_error)
83+
84+
85+
def check_results(output: torch.Tensor) -> None:
86+
"""
87+
Compare top model output to expected result.
88+
89+
Parameters
90+
----------
91+
output: torch.Tensor
92+
Output from ResNet-18.
93+
"""
94+
# Run a softmax to get probabilities
95+
predicted_prob = torch.max(torch.nn.functional.softmax(output[0], dim=0))
96+
expected_prob = 0.8846225142478943
97+
if not isclose(predicted_prob, expected_prob, abs_tol=1e-5):
98+
result_error = (
99+
f"Predicted probability: {predicted_prob} does not match the expected"
100+
f" value: {expected_prob}."
133101
)
134102
raise ValueError(result_error)
135103

136104

137105
if __name__ == "__main__":
138-
np_precision = np.float32
106+
import argparse
139107

108+
# Parse user input
109+
parser = argparse.ArgumentParser(
110+
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
111+
)
112+
parser.add_argument(
113+
"--device_type",
114+
help="Device type to run the inference on",
115+
type=str,
116+
choices=["cpu", "cuda", "hip", "xpu", "mps"],
117+
default="cpu",
118+
)
119+
parsed_args = parser.parse_args()
120+
parsed_args = parser.parse_args()
121+
device_type = parsed_args.device_type
122+
data_dir = os.path.join(os.path.dirname(__file__), "data")
123+
124+
# Specify working precision
125+
np_precision = np.float32
140126
if np_precision == np.float32:
141127
torch_precision = torch.float32
142128
elif np_precision == np.float64:
@@ -145,5 +131,58 @@ def print_top_results(output: torch.Tensor) -> None:
145131
precision_mismatch = "`np_precision` must be type `np.float32` or `np.float64`"
146132
raise ValueError(precision_mismatch)
147133

148-
rn_model = initialize(torch_precision)
149-
run_model(rn_model, np_precision)
134+
# Initialize model on the specified device
135+
model = initialize(torch_precision).to(device_type)
136+
137+
# Transform image into the form expected by the pre-trained model, using the mean
138+
# and standard deviation from the ImageNet dataset
139+
# See: https://pytorch.org/vision/0.8/models.html
140+
image_filename = os.path.join(data_dir, "dog.jpg")
141+
input_image = Image.open(image_filename)
142+
preprocess = torchvision.transforms.Compose(
143+
[
144+
torchvision.transforms.Resize(256),
145+
torchvision.transforms.CenterCrop(224),
146+
torchvision.transforms.ToTensor(),
147+
torchvision.transforms.Normalize(
148+
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
149+
),
150+
]
151+
)
152+
input_tensor = preprocess(input_image)
153+
input_batch = input_tensor.unsqueeze(0)
154+
155+
print("Saving input batch...", end="")
156+
# Transpose input before saving so order consistent with Fortran
157+
np_input = np.array(input_batch.numpy().transpose().flatten(), dtype=np_precision) # type: np.typing.NDArray
158+
159+
# Save data as binary
160+
tensor_filename = os.path.join(data_dir, "image_tensor.dat")
161+
np_input.tofile(tensor_filename)
162+
163+
# Load saved data to check it was saved correctly
164+
np_data = np.fromfile(tensor_filename, dtype=np_precision) # type: np.typing.NDArray
165+
166+
# Reshape to original tensor shape
167+
tensor_shape = np.array(input_batch.numpy()).transpose().shape
168+
np_data = np_data.reshape(tensor_shape)
169+
np_data = np_data.transpose()
170+
if not np.array_equal(np_data, input_batch.numpy()):
171+
result_error = (
172+
f"Image read from saved file ({tensor_filename}) does not match processed"
173+
f" data read from {data_dir}/dog.jpg expected value."
174+
)
175+
raise ValueError(result_error)
176+
print("done.")
177+
178+
# Save the input tensor in PyTorch format
179+
input_batch = input_batch.to(device_type)
180+
torch.save(input_batch, f"pytorch_resnet18_input_tensor_{device_type}.pt")
181+
182+
# Run the model
183+
print("Running ResNet-18 model for input...", end="")
184+
with torch.inference_mode():
185+
output = model(input_batch)
186+
print("done.")
187+
print_top_results(output, data_dir)
188+
check_results(output)

examples/03_ResNet/resnet_infer_fortran.f90

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -115,6 +115,7 @@ subroutine main()
115115
deallocate(args)
116116

117117
if (.not. test_pass) then
118+
write (*,*) "Predicted probability does not match the expected value:", expected_prob
118119
stop 999
119120
end if
120121

examples/03_ResNet/resnet_infer_python.py

Lines changed: 16 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -1,14 +1,15 @@
11
"""Load ResNet-18 saved to TorchScript and run inference with an example image."""
22

33
import os
4-
from math import isclose
54

65
import numpy as np
76
import torch
8-
from resnet18 import print_top_results
7+
from resnet18 import check_results, print_top_results
98

109

11-
def deploy(saved_model: str, device: str, batch_size: int = 1) -> torch.Tensor:
10+
def deploy(
11+
saved_model: str, device: str, data_dir: str, batch_size: int = 1
12+
) -> torch.Tensor:
1213
"""
1314
Load TorchScript ResNet-18 and run inference with Tensor from example image.
1415
@@ -18,6 +19,8 @@ def deploy(saved_model: str, device: str, batch_size: int = 1) -> torch.Tensor:
1819
location of ResNet-18 saved to Torchscript
1920
device : str
2021
Torch device to run model on, 'cpu' or 'cuda'
22+
data_dir : str
23+
Path to data directory
2124
batch_size : int
2225
batch size to run (default 1)
2326
@@ -29,7 +32,7 @@ def deploy(saved_model: str, device: str, batch_size: int = 1) -> torch.Tensor:
2932
transposed_shape = [224, 224, 3, batch_size]
3033
precision = np.float32
3134

32-
np_data = np.fromfile("data/image_tensor.dat", dtype=precision)
35+
np_data = np.fromfile(os.path.join(data_dir, "image_tensor.dat"), dtype=precision)
3336
np_data = np_data.reshape(transposed_shape)
3437
np_data = np_data.transpose()
3538
input_tensor = torch.from_numpy(np_data)
@@ -56,26 +59,6 @@ def deploy(saved_model: str, device: str, batch_size: int = 1) -> torch.Tensor:
5659
return output
5760

5861

59-
def check_results(output: torch.Tensor) -> None:
60-
"""
61-
Compare top model output to expected result.
62-
63-
Parameters
64-
----------
65-
output: torch.Tensor
66-
Output from ResNet-18.
67-
"""
68-
# Run a softmax to get probabilities
69-
predicted_prob = torch.max(torch.nn.functional.softmax(output[0], dim=0))
70-
expected_prob = 0.8846225142478943
71-
if not isclose(predicted_prob, expected_prob, abs_tol=1e-5):
72-
result_error = (
73-
f"Predicted probability: {predicted_prob} does not match the"
74-
"expected value: {expected_prob}."
75-
)
76-
raise ValueError(result_error)
77-
78-
7962
if __name__ == "__main__":
8063
import argparse
8164

@@ -88,15 +71,22 @@ def check_results(output: torch.Tensor) -> None:
8871
type=str,
8972
default=os.path.dirname(__file__),
9073
)
74+
parser.add_argument(
75+
"--data_dir",
76+
help="Path to the directory containing the input data",
77+
type=str,
78+
default=os.path.join(os.path.dirname(__file__), "data"),
79+
)
9180
parsed_args = parser.parse_args()
9281
filepath = parsed_args.filepath
82+
data_dir = parsed_args.data_dir
9383
saved_model_file = os.path.join(filepath, "torchscript_resnet18_model_cpu.pt")
9484

9585
device_to_run = "cpu"
9686

9787
batch_size_to_run = 1
9888

9989
with torch.inference_mode():
100-
result = deploy(saved_model_file, device_to_run, batch_size_to_run)
101-
print_top_results(result)
90+
result = deploy(saved_model_file, device_to_run, data_dir, batch_size_to_run)
91+
print_top_results(result, data_dir)
10292
check_results(result)

examples/04_Batching/README.md

Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -171,6 +171,24 @@ pt2ts BatchingNet \
171171
```
172172
This should produce `torchscript_batchingnet_model_cpu.pt`.
173173

174+
A Python file `batchingnet.py` is provided that defines the simple PyTorch
175+
'net'. Running this file as a script will write the model out in PyTorch's
176+
`.pt` file format.
177+
```
178+
python3 batchingnet.py
179+
```
180+
You should find that a PyTorch model file `pytorch_batchingnet_model_cpu.pt` is
181+
created.
182+
183+
To convert the BatchingNet model to TorchScript for use in Fortran, run:
184+
```
185+
pt2ts BatchingNet \
186+
--model_definition_file batchingnet.py \
187+
--input_model_file pytorch_batchingnet_model_cpu.pt \
188+
--output_model_file torchscript_batchingnet_model_cpu.pt
189+
```
190+
This should produce `torchscript_batchingnet_model_cpu.pt`.
191+
174192
You can see how you would perform batching in PyTorch by running the Python batching
175193
demo:
176194
```

0 commit comments

Comments
 (0)