This shows you the differences between two versions of the page.
Both sides previous revision Previous revision Next revision | Previous revision | ||
products:sbc:vim4:npu:demos:retinaface [2024/01/04 05:16] louis |
products:sbc:vim4:npu:demos:retinaface [2024/01/29 02:16] (current) louis |
||
---|---|---|---|
Line 54: | Line 54: | ||
==== Convert ==== | ==== Convert ==== | ||
- | After training the model, we should convert the PyTorch model into an ONNX model. | + | After training the model, we should convert the PyTorch model into an ONNX model. Create the Python conversion script as follows and run. |
- | + | ||
- | Copy '' | + | |
- | + | ||
- | ```diff | + | |
- | class ClassHead(nn.Module): | + | |
- | def __init__(self, | + | |
- | super(ClassHead, | + | |
- | self.num_anchors = num_anchors | + | |
- | self.conv1x1 = nn.Conv2d(inchannels, | + | |
- | + | ||
- | def forward(self, | + | |
- | out = self.conv1x1(x) | + | |
- | - out = out.permute(0, | + | |
- | + out = out.contiguous() | + | |
- | + | ||
- | - | + | |
- | + | + | |
- | + | ||
- | class BboxHead(nn.Module): | + | |
- | def __init__(self, | + | |
- | super(BboxHead, | + | |
- | self.conv1x1 = nn.Conv2d(inchannels, | + | |
- | + | ||
- | def forward(self, | + | |
- | out = self.conv1x1(x) | + | |
- | - out = out.permute(0, | + | |
- | + out = out.contiguous() | + | |
- | + | ||
- | - | + | |
- | + | + | |
- | + | ||
- | class LandmarkHead(nn.Module): | + | |
- | def __init__(self, | + | |
- | super(LandmarkHead, | + | |
- | self.conv1x1 = nn.Conv2d(inchannels, | + | |
- | + | ||
- | def forward(self, | + | |
- | out = self.conv1x1(x) | + | |
- | - out = out.permute(0, | + | |
- | + out = out.contiguous() | + | |
- | + | ||
- | - | + | |
- | + | + | |
- | ``` | + | |
- | + | ||
- | ```diff | + | |
- | - | + | |
- | - | + | |
- | - | + | |
- | + | + | |
- | + | + | |
- | + | + | |
- | + | ||
- | if self.mode == ' | + | |
- | output = (bbox_regressions, | + | |
- | else: | + | |
- | - | + | |
- | + | + | |
- | return output | + | |
- | ``` | + | |
- | + | ||
- | Create the Python conversion script as follows and run. | + | |
```python export.py | ```python export.py | ||
import torch | import torch | ||
import numpy as np | import numpy as np | ||
- | from nets.retinaface_export | + | from nets.retinaface |
from utils.config import cfg_mnet, cfg_re50 | from utils.config import cfg_mnet, cfg_re50 | ||
Line 151: | Line 89: | ||
--inputs " | --inputs " | ||
--input-shapes | --input-shapes | ||
- | --inference-input-type float32 \ | ||
- | --inference-output-type float32 \ | ||
--dtypes " | --dtypes " | ||
+ | --inference-input-type float32 \ | ||
+ | --inference-output-type float32 \ | ||
--quantize-dtype int8 --outdir onnx_output | --quantize-dtype int8 --outdir onnx_output | ||
--channel-mean-value " | --channel-mean-value " | ||
- | --source-file ./dataset.txt \ | + | --source-file ./retinaface_dataset.txt \ |
--iterations 500 \ | --iterations 500 \ | ||
--disable-per-channel False \ | --disable-per-channel False \ | ||
--batch-size 1 --target-platform PRODUCT_PID0XA003 | --batch-size 1 --target-platform PRODUCT_PID0XA003 | ||
``` | ``` | ||
- | |||
- | <WRAP important > | ||
- | Please prepare about 500 pictures for quantification. If the pictures size is smaller than model input size, please resize pictures to input size before quantification. | ||
- | </ | ||
Run '' | Run '' | ||
Line 181: | Line 115: | ||
$ git clone https:// | $ git clone https:// | ||
``` | ``` | ||
- | |||
- | <WRAP important > | ||
- | If your kernel version is 5.4 or earlier, please use tag '' | ||
- | </ | ||
==== Install dependencies ==== | ==== Install dependencies ==== | ||
Line 224: | Line 154: | ||
# Run | # Run | ||
- | $ sudo ./ | + | $ sudo ./ |
``` | ``` | ||
'' | '' | ||