Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
41 changes: 22 additions & 19 deletions CaffeLoader.py
Original file line number Diff line number Diff line change
Expand Up @@ -193,28 +193,31 @@ def buildSequential(channel_list, pooling):
'P': ['pool1', 'pool2', 'pool3', 'pool4', 'pool5'],
}

name_dict = {
'vgg19': ['vgg-19', 'vgg19', 'vgg_19',],
'vgg16': ['vgg-16', 'vgg16', 'vgg_16', 'fcn32s', 'pruning', 'sod'],
}


def modelSelector(model_file, pooling):
vgg_list = ["fcn32s", "pruning", "sod", "vgg"]
if any(name in model_file for name in vgg_list):
if "pruning" in model_file:
print("VGG-16 Architecture Detected")
print("Using The Channel Pruning Model")
cnn, layerList = VGG_PRUNED(buildSequential(channel_list['VGG-16p'], pooling)), vgg16_dict
elif "fcn32s" in model_file:
if any(name in model_file for name in ['vgg'] + name_dict['vgg16'] + name_dict['vgg19']):
if any(name in model_file for name in name_dict['vgg16']):
print("VGG-16 Architecture Detected")
print("Using the fcn32s-heavy-pascal Model")
cnn, layerList = VGG_FCN32S(buildSequential(channel_list['VGG-16'], pooling)), vgg16_dict
elif "sod" in model_file:
print("VGG-16 Architecture Detected")
print("Using The SOD Fintune Model")
cnn, layerList = VGG_SOD(buildSequential(channel_list['VGG-16'], pooling)), vgg16_dict
elif "19" in model_file:
if "pruning" in model_file:
print("Using The Channel Pruning Model")
cnn, layerList = VGG_PRUNED(buildSequential(channel_list['VGG-16p'], pooling)), vgg16_dict
elif "fcn32s" in model_file:
print("Using the fcn32s-heavy-pascal Model")
cnn, layerList = VGG_FCN32S(buildSequential(channel_list['VGG-16'], pooling)), vgg16_dict
elif "sod" in model_file:
print("Using The SOD Fintune Model")
cnn, layerList = VGG_SOD(buildSequential(channel_list['VGG-16'], pooling)), vgg16_dict
elif "16" in model_file:
cnn, layerList = VGG(buildSequential(channel_list['VGG-16'], pooling)), vgg16_dict
elif any(name in model_file for name in name_dict['vgg19']):
print("VGG-19 Architecture Detected")
cnn, layerList = VGG(buildSequential(channel_list['VGG-19'], pooling)), vgg19_dict
elif "16" in model_file:
print("VGG-16 Architecture Detected")
cnn, layerList = VGG(buildSequential(channel_list['VGG-16'], pooling)), vgg16_dict
if "19" in model_file:
cnn, layerList = VGG(buildSequential(channel_list['VGG-19'], pooling)), vgg19_dict
else:
raise ValueError("VGG architecture not recognized.")
elif "nin" in model_file:
Expand Down Expand Up @@ -251,4 +254,4 @@ def loadCaffemodel(model_file, pooling, use_gpu, disable_check):

print_loadcaffe(cnn, layerList)

return cnn, layerList
return cnn, layerList
2 changes: 1 addition & 1 deletion INSTALL.md
Original file line number Diff line number Diff line change
Expand Up @@ -197,7 +197,7 @@ Then using https://pytorch.org/, get the correct pip command, paste it into the


```
pip3 install torch===1.3.0 torchvision===0.4.1 -f https://download.pytorch.org/whl/torch_stable.html
pip3 install torch===1.3.1 torchvision===0.4.2 -f https://download.pytorch.org/whl/torch_stable.html
```


Expand Down
1 change: 1 addition & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -195,6 +195,7 @@ path or a full absolute path.
when using ADAM you will probably need to play with other parameters to get good results, especially
the style weight, content weight, and learning rate.
* `-learning_rate`: Learning rate to use with the ADAM optimizer. Default is 1e1.
* `-normalize_weights`: If this flag is present, style and content weights will be divided by the number of channels for each layer. Idea from [PytorchNeuralStyleTransfer](https://github.com/leongatys/PytorchNeuralStyleTransfer).

**Output options**:
* `-output_image`: Name of the output image. Default is `out.png`.
Expand Down