Skip to content
This repository was archived by the owner on Aug 28, 2024. It is now read-only.

Commit 3e720e7

Browse files
committed
updated code, script, Podfile, and readme for Image Segmentation for lite interpreter
1 parent f5578a4 commit 3e720e7

File tree

6 files changed

+26
-20
lines changed

6 files changed

+26
-20
lines changed

ImageSegmentation/ImageSegmentation.xcodeproj/project.pbxproj

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -14,11 +14,11 @@
1414
265BAFEF253A6A6800467AC4 /* Main.storyboard in Resources */ = {isa = PBXBuildFile; fileRef = 265BAFED253A6A6800467AC4 /* Main.storyboard */; };
1515
265BAFF1253A6A6900467AC4 /* Assets.xcassets in Resources */ = {isa = PBXBuildFile; fileRef = 265BAFF0253A6A6900467AC4 /* Assets.xcassets */; };
1616
265BAFF4253A6A6900467AC4 /* LaunchScreen.storyboard in Resources */ = {isa = PBXBuildFile; fileRef = 265BAFF2253A6A6900467AC4 /* LaunchScreen.storyboard */; };
17-
265BB005253A6B6200467AC4 /* deeplabv3_scripted.pt in Resources */ = {isa = PBXBuildFile; fileRef = 265BB004253A6B6200467AC4 /* deeplabv3_scripted.pt */; };
1817
265BB008253A6B9600467AC4 /* deeplab.jpg in Resources */ = {isa = PBXBuildFile; fileRef = 265BB007253A6B9600467AC4 /* deeplab.jpg */; };
1918
265BB00E253A6E0E00467AC4 /* UIImage+Helper.swift in Sources */ = {isa = PBXBuildFile; fileRef = 265BB00D253A6E0E00467AC4 /* UIImage+Helper.swift */; };
2019
265BB017253A7F0500467AC4 /* TorchModule.mm in Sources */ = {isa = PBXBuildFile; fileRef = 265BB015253A7F0500467AC4 /* TorchModule.mm */; };
2120
265F9A6F2551CB3700B8F2EC /* dog.jpg in Resources */ = {isa = PBXBuildFile; fileRef = 265F9A6E2551CB3700B8F2EC /* dog.jpg */; };
21+
266A451D267974C300548578 /* deeplabv3_scripted.ptl in Resources */ = {isa = PBXBuildFile; fileRef = 266A451C267974C300548578 /* deeplabv3_scripted.ptl */; };
2222
/* End PBXBuildFile section */
2323

2424
/* Begin PBXFileReference section */
@@ -33,12 +33,12 @@
3333
265BAFF3253A6A6900467AC4 /* Base */ = {isa = PBXFileReference; lastKnownFileType = file.storyboard; name = Base; path = Base.lproj/LaunchScreen.storyboard; sourceTree = "<group>"; };
3434
265BAFF5253A6A6900467AC4 /* Info.plist */ = {isa = PBXFileReference; lastKnownFileType = text.plist.xml; path = Info.plist; sourceTree = "<group>"; };
3535
265BAFFF253A6B1200467AC4 /* ImageSegmentation-Bridging-Header.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = "ImageSegmentation-Bridging-Header.h"; sourceTree = "<group>"; };
36-
265BB004253A6B6200467AC4 /* deeplabv3_scripted.pt */ = {isa = PBXFileReference; lastKnownFileType = file; path = deeplabv3_scripted.pt; sourceTree = "<group>"; };
3736
265BB007253A6B9600467AC4 /* deeplab.jpg */ = {isa = PBXFileReference; lastKnownFileType = image.jpeg; path = deeplab.jpg; sourceTree = "<group>"; };
3837
265BB00D253A6E0E00467AC4 /* UIImage+Helper.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = "UIImage+Helper.swift"; sourceTree = "<group>"; };
3938
265BB015253A7F0500467AC4 /* TorchModule.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; path = TorchModule.mm; sourceTree = "<group>"; };
4039
265BB016253A7F0500467AC4 /* TorchModule.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = TorchModule.h; sourceTree = "<group>"; };
4140
265F9A6E2551CB3700B8F2EC /* dog.jpg */ = {isa = PBXFileReference; lastKnownFileType = image.jpeg; path = dog.jpg; sourceTree = "<group>"; };
41+
266A451C267974C300548578 /* deeplabv3_scripted.ptl */ = {isa = PBXFileReference; lastKnownFileType = file; path = deeplabv3_scripted.ptl; sourceTree = "<group>"; };
4242
/* End PBXFileReference section */
4343

4444
/* Begin PBXFrameworksBuildPhase section */
@@ -92,7 +92,7 @@
9292
265BB015253A7F0500467AC4 /* TorchModule.mm */,
9393
265BB00D253A6E0E00467AC4 /* UIImage+Helper.swift */,
9494
265BAFFF253A6B1200467AC4 /* ImageSegmentation-Bridging-Header.h */,
95-
265BB004253A6B6200467AC4 /* deeplabv3_scripted.pt */,
95+
266A451C267974C300548578 /* deeplabv3_scripted.ptl */,
9696
265BB007253A6B9600467AC4 /* deeplab.jpg */,
9797
265F9A6E2551CB3700B8F2EC /* dog.jpg */,
9898
);
@@ -161,8 +161,8 @@
161161
265BAFF4253A6A6900467AC4 /* LaunchScreen.storyboard in Resources */,
162162
265BB008253A6B9600467AC4 /* deeplab.jpg in Resources */,
163163
265BAFF1253A6A6900467AC4 /* Assets.xcassets in Resources */,
164-
265BB005253A6B6200467AC4 /* deeplabv3_scripted.pt in Resources */,
165164
265BAFEF253A6A6800467AC4 /* Main.storyboard in Resources */,
165+
266A451D267974C300548578 /* deeplabv3_scripted.ptl in Resources */,
166166
);
167167
runOnlyForDeploymentPostprocessing = 0;
168168
};

ImageSegmentation/ImageSegmentation/TorchModule.mm

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -8,18 +8,18 @@
88
#import "UIImageHelper.h"
99
#import <CoreImage/CoreImage.h>
1010
#import <ImageIO/ImageIO.h>
11-
#import <LibTorch/LibTorch.h>
11+
#import <Libtorch-Lite/Libtorch-Lite.h>
1212

1313
@implementation TorchModule {
1414
@protected
15-
torch::jit::script::Module _impl;
15+
torch::jit::mobile::Module _impl;
1616
}
1717

1818
- (nullable instancetype)initWithFileAtPath:(NSString*)filePath {
1919
self = [super init];
2020
if (self) {
2121
try {
22-
_impl = torch::jit::load(filePath.UTF8String);
22+
_impl = torch::jit::_load_for_mobile(filePath.UTF8String);
2323
_impl.eval();
2424
} catch (const std::exception& exception) {
2525
NSLog(@"%s", exception.what());

ImageSegmentation/ImageSegmentation/ViewController.swift

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@ class ViewController: UIViewController {
1010

1111
private lazy var module: TorchModule = {
1212
if let filePath = Bundle.main.path(forResource:
13-
"deeplabv3_scripted", ofType: "pt"),
13+
"deeplabv3_scripted", ofType: "ptl"),
1414
let module = TorchModule(fileAtPath: filePath) {
1515
return module
1616
} else {

ImageSegmentation/Podfile

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,5 +6,5 @@ target 'ImageSegmentation' do
66
use_frameworks!
77

88
# Pods for ImageSegmentation
9-
pod 'LibTorch', '~>1.7.0'
9+
pod 'LibTorch-Lite', '~>1.9.0'
1010
end

ImageSegmentation/README.md

Lines changed: 14 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,15 @@
1-
# Semantic Image Segmentation DeepLabV3 on iOS
1+
# Semantic Image Segmentation DeepLabV3 with Mobile Interpreter on iOS
22

33
## Introduction
44

5-
This repo offers a Python script that converts the [PyTorch DeepLabV3 model](https://pytorch.org/hub/pytorch_vision_deeplabv3_resnet101) for mobile apps and an iOS app that uses the model to segment images.
5+
This repo offers a Python script that converts the [PyTorch DeepLabV3 model](https://pytorch.org/hub/pytorch_vision_deeplabv3_resnet101) to the Mobile Interpreter version and an iOS app that uses the model to segment images.
6+
7+
## Prerequisites
8+
9+
* PyTorch 1.9.0 and torchvision 0.10.0 (Optional)
10+
* Python 3.8 or above (Optional)
11+
* iOS Cocoapods LibTorch-Lite 1.9.0
12+
* Xcode 12.4 or later
613

714
## Quick Start
815

@@ -14,19 +21,19 @@ If you don't have the PyTorch environment set up to run the script below to gene
1421

1522
Be aware that the downloadable model file was created with PyTorch 1.7.0, matching the iOS LibTorch library 1.7.0 specified in the `Podfile`. If you use a different version of PyTorch to create your model by following the instructions below, make sure you specify the same iOS LibTorch version in the `Podfile` to avoid possible errors caused by the version mismatch. Furthermore, if you want to use the latest prototype features in the PyTorch master branch to create the model, follow the steps at [Building PyTorch iOS Libraries from Source](https://pytorch.org/mobile/ios/#build-pytorch-ios-libraries-from-source) on how to use the model in iOS.
1623

17-
Open a Mac Terminal, run the following commands:
24+
Open a Mac Terminal, first install PyTorch 1.9.0 and torchvision 0.10.0 using command like `pip install torch torchvision`, then run the following commands:
1825

1926
```
2027
git clone https://github.com/pytorch/ios-demo-app
2128
cd ios-demo-app/ImageSegmentation
2229
python deeplabv3.py
2330
```
2431

25-
The Python script `deeplabv3.py` is used to generate the TorchScript-formatted model for mobile apps. Then run `mv deeplabv3_scripted.pt ImageSegmentation` to move the model file to the right location.
32+
The Python script `deeplabv3.py` is used to generate the Lite Interpreter model file `deeplabv3_scripted.ptl` to be used in iOS.
2633

2734
### 2. Use LibTorch
2835

29-
Run the commands below:
36+
Run the commands below (note the `Podfile` uses `pod 'LibTorch-Lite', '~>1.9.0'`):
3037

3138
```
3239
pod install
@@ -36,8 +43,6 @@ open ImageSegmentation.xcworkspace/
3643
### 3. Run the app
3744
Select an iOS simulator or device on Xcode to run the app. The example image and its segmented result are as follows:
3845

39-
results are:
40-
4146
![](screenshot1.png)
4247
![](screenshot2.png)
4348

@@ -46,3 +51,5 @@ Note that the `resized` method in `UIImage+Helper.swift` is used to speed up the
4651
## Tutorial
4752

4853
Read the tutorial [here](https://pytorch.org/tutorials/beginner/deeplabv3_on_ios.html) for detailed step-by-step instructions of how to prepare and run the [PyTorch DeepLabV3 model](https://pytorch.org/hub/pytorch_vision_deeplabv3_resnet101) on iOS, as well as practical tips on how to successfully use a pre-trained PyTorch model on iOS and avoid common pitfalls.
54+
55+
For more information on using Mobile Interpreter in Android, see the tutorial [here](https://pytorch.org/tutorials/recipes/mobile_interpreter.html).

ImageSegmentation/deeplabv3.py

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,7 @@
11
import torch
22

3-
model = torch.hub.load('pytorch/vision:v0.7.0', 'deeplabv3_resnet50', pretrained=True)
3+
model = torch.hub.load('pytorch/vision:v0.9.0', 'deeplabv3_resnet50', pretrained=True)
44
model.eval()
55

6-
scriptedm = torch.jit.script(model)
7-
torch.jit.save(scriptedm, "deeplabv3_scripted.pt")
8-
6+
scripted_module = torch.jit.script(model)
7+
scripted_module._save_for_lite_interpreter("ImageSegmentation/deeplabv3_scripted.ptl")

0 commit comments

Comments
 (0)