-
Notifications
You must be signed in to change notification settings - Fork 661
/
OmniScaleCNN.py
121 lines (97 loc) · 4.49 KB
/
OmniScaleCNN.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
# AUTOGENERATED! DO NOT EDIT! File to edit: ../../nbs/051_models.OmniScaleCNN.ipynb.
# %% auto 0
__all__ = ['SampaddingConv1D_BN', 'build_layer_with_layer_parameter', 'OmniScaleCNN', 'get_Prime_number_in_a_range',
'get_out_channel_number', 'generate_layer_parameter_list']
# %% ../../nbs/051_models.OmniScaleCNN.ipynb 3
from ..imports import *
from .layers import *
from .utils import *
# %% ../../nbs/051_models.OmniScaleCNN.ipynb 4
#This is an unofficial PyTorch implementation by Ignacio Oguiza - [email protected] based on:
# Rußwurm, M., & Körner, M. (2019). Self-attention for raw optical satellite time series classification. arXiv preprint arXiv:1910.10536.
# Official implementation: https://github.com/dl4sits/BreizhCrops/blob/master/breizhcrops/models/OmniScaleCNN.py
class SampaddingConv1D_BN(Module):
def __init__(self, in_channels, out_channels, kernel_size):
self.padding = nn.ConstantPad1d((int((kernel_size - 1) / 2), int(kernel_size / 2)), 0)
self.conv1d = torch.nn.Conv1d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size)
self.bn = nn.BatchNorm1d(num_features=out_channels)
def forward(self, x):
x = self.padding(x)
x = self.conv1d(x)
x = self.bn(x)
return x
class build_layer_with_layer_parameter(Module):
"""
formerly build_layer_with_layer_parameter
"""
def __init__(self, layer_parameters):
"""
layer_parameters format
[in_channels, out_channels, kernel_size,
in_channels, out_channels, kernel_size,
..., nlayers
]
"""
self.conv_list = nn.ModuleList()
for i in layer_parameters:
# in_channels, out_channels, kernel_size
conv = SampaddingConv1D_BN(i[0], i[1], i[2])
self.conv_list.append(conv)
def forward(self, x):
conv_result_list = []
for conv in self.conv_list:
conv_result = conv(x)
conv_result_list.append(conv_result)
result = F.relu(torch.cat(tuple(conv_result_list), 1))
return result
class OmniScaleCNN(Module):
def __init__(self, c_in, c_out, seq_len, layers=[8 * 128, 5 * 128 * 256 + 2 * 256 * 128], few_shot=False):
receptive_field_shape = seq_len//4
layer_parameter_list = generate_layer_parameter_list(1,receptive_field_shape, layers, in_channel=c_in)
self.few_shot = few_shot
self.layer_parameter_list = layer_parameter_list
self.layer_list = []
for i in range(len(layer_parameter_list)):
layer = build_layer_with_layer_parameter(layer_parameter_list[i])
self.layer_list.append(layer)
self.net = nn.Sequential(*self.layer_list)
self.gap = GAP1d(1)
out_put_channel_number = 0
for final_layer_parameters in layer_parameter_list[-1]:
out_put_channel_number = out_put_channel_number + final_layer_parameters[1]
self.hidden = nn.Linear(out_put_channel_number, c_out)
def forward(self, x):
x = self.net(x)
x = self.gap(x)
if not self.few_shot: x = self.hidden(x)
return x
def get_Prime_number_in_a_range(start, end):
Prime_list = []
for val in range(start, end + 1):
prime_or_not = True
for n in range(2, val):
if (val % n) == 0:
prime_or_not = False
break
if prime_or_not:
Prime_list.append(val)
return Prime_list
def get_out_channel_number(paramenter_layer, in_channel, prime_list):
out_channel_expect = max(1, int(paramenter_layer / (in_channel * sum(prime_list))))
return out_channel_expect
def generate_layer_parameter_list(start, end, layers, in_channel=1):
prime_list = get_Prime_number_in_a_range(start, end)
layer_parameter_list = []
for paramenter_number_of_layer in layers:
out_channel = get_out_channel_number(paramenter_number_of_layer, in_channel, prime_list)
tuples_in_layer = []
for prime in prime_list:
tuples_in_layer.append((in_channel, out_channel, prime))
in_channel = len(prime_list) * out_channel
layer_parameter_list.append(tuples_in_layer)
tuples_in_layer_last = []
first_out_channel = len(prime_list) * get_out_channel_number(layers[0], 1, prime_list)
tuples_in_layer_last.append((in_channel, first_out_channel, 1))
tuples_in_layer_last.append((in_channel, first_out_channel, 2))
layer_parameter_list.append(tuples_in_layer_last)
return layer_parameter_list