-
Notifications
You must be signed in to change notification settings - Fork 228
/
model.py
87 lines (71 loc) · 2.77 KB
/
model.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
import torch
import torch.nn as nn
import torch.nn.functional as F
class VoiceFilter(nn.Module):
def __init__(self, hp):
super(VoiceFilter, self).__init__()
self.hp = hp
assert hp.audio.n_fft // 2 + 1 == hp.audio.num_freq == hp.model.fc2_dim, \
"stft-related dimension mismatch"
self.conv = nn.Sequential(
# cnn1
nn.ZeroPad2d((3, 3, 0, 0)),
nn.Conv2d(1, 64, kernel_size=(1, 7), dilation=(1, 1)),
nn.BatchNorm2d(64), nn.ReLU(),
# cnn2
nn.ZeroPad2d((0, 0, 3, 3)),
nn.Conv2d(64, 64, kernel_size=(7, 1), dilation=(1, 1)),
nn.BatchNorm2d(64), nn.ReLU(),
# cnn3
nn.ZeroPad2d(2),
nn.Conv2d(64, 64, kernel_size=(5, 5), dilation=(1, 1)),
nn.BatchNorm2d(64), nn.ReLU(),
# cnn4
nn.ZeroPad2d((2, 2, 4, 4)),
nn.Conv2d(64, 64, kernel_size=(5, 5), dilation=(2, 1)), # (9, 5)
nn.BatchNorm2d(64), nn.ReLU(),
# cnn5
nn.ZeroPad2d((2, 2, 8, 8)),
nn.Conv2d(64, 64, kernel_size=(5, 5), dilation=(4, 1)), # (17, 5)
nn.BatchNorm2d(64), nn.ReLU(),
# cnn6
nn.ZeroPad2d((2, 2, 16, 16)),
nn.Conv2d(64, 64, kernel_size=(5, 5), dilation=(8, 1)), # (33, 5)
nn.BatchNorm2d(64), nn.ReLU(),
# cnn7
nn.ZeroPad2d((2, 2, 32, 32)),
nn.Conv2d(64, 64, kernel_size=(5, 5), dilation=(16, 1)), # (65, 5)
nn.BatchNorm2d(64), nn.ReLU(),
# cnn8
nn.Conv2d(64, 8, kernel_size=(1, 1), dilation=(1, 1)),
nn.BatchNorm2d(8), nn.ReLU(),
)
self.lstm = nn.LSTM(
8*hp.audio.num_freq + hp.embedder.emb_dim,
hp.model.lstm_dim,
batch_first=True,
bidirectional=True)
self.fc1 = nn.Linear(2*hp.model.lstm_dim, hp.model.fc1_dim)
self.fc2 = nn.Linear(hp.model.fc1_dim, hp.model.fc2_dim)
def forward(self, x, dvec):
# x: [B, T, num_freq]
x = x.unsqueeze(1)
# x: [B, 1, T, num_freq]
x = self.conv(x)
# x: [B, 8, T, num_freq]
x = x.transpose(1, 2).contiguous()
# x: [B, T, 8, num_freq]
x = x.view(x.size(0), x.size(1), -1)
# x: [B, T, 8*num_freq]
# dvec: [B, emb_dim]
dvec = dvec.unsqueeze(1)
dvec = dvec.repeat(1, x.size(1), 1)
# dvec: [B, T, emb_dim]
x = torch.cat((x, dvec), dim=2) # [B, T, 8*num_freq + emb_dim]
x, _ = self.lstm(x) # [B, T, 2*lstm_dim]
x = F.relu(x)
x = self.fc1(x) # x: [B, T, fc1_dim]
x = F.relu(x)
x = self.fc2(x) # x: [B, T, fc2_dim], fc2_dim == num_freq
x = torch.sigmoid(x)
return x