Skip to content

Commit 9cedd05

Browse files
committed
[MNT] Code formatting
1 parent 9a7b561 commit 9cedd05

23 files changed

Lines changed: 1012 additions & 751 deletions

‎examples/classification_cifar10_cnn.py‎

Lines changed: 39 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,6 @@ def display_records(records, logger):
2828

2929

3030
class LeNet5(nn.Module):
31-
3231
def __init__(self):
3332
super(LeNet5, self).__init__()
3433
self.conv1 = nn.Conv2d(3, 6, 5)
@@ -68,22 +67,25 @@ def forward(self, x):
6867
transforms.RandomHorizontalFlip(),
6968
transforms.RandomCrop(32, 4),
7069
transforms.ToTensor(),
71-
transforms.Normalize((0.4914, 0.4822, 0.4465),
72-
(0.2023, 0.1994, 0.2010)),
70+
transforms.Normalize(
71+
(0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)
72+
),
7373
]
7474
)
7575

7676
test_transformer = transforms.Compose(
7777
[
7878
transforms.ToTensor(),
79-
transforms.Normalize((0.4914, 0.4822, 0.4465),
80-
(0.2023, 0.1994, 0.2010)),
79+
transforms.Normalize(
80+
(0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)
81+
),
8182
]
8283
)
8384

8485
train_loader = DataLoader(
85-
datasets.CIFAR10(data_dir, train=True, download=True,
86-
transform=train_transformer),
86+
datasets.CIFAR10(
87+
data_dir, train=True, download=True, transform=train_transformer
88+
),
8789
batch_size=batch_size,
8890
shuffle=True,
8991
)
@@ -98,9 +100,7 @@ def forward(self, x):
98100

99101
# FusionClassifier
100102
model = FusionClassifier(
101-
estimator=LeNet5,
102-
n_estimators=n_estimators,
103-
cuda=True
103+
estimator=LeNet5, n_estimators=n_estimators, cuda=True
104104
)
105105

106106
# Set the optimizer
@@ -118,14 +118,13 @@ def forward(self, x):
118118
toc = time.time()
119119
evaluating_time = toc - tic
120120

121-
records.append(("FusionClassifier", training_time, evaluating_time,
122-
testing_acc))
121+
records.append(
122+
("FusionClassifier", training_time, evaluating_time, testing_acc)
123+
)
123124

124125
# VotingClassifier
125126
model = VotingClassifier(
126-
estimator=LeNet5,
127-
n_estimators=n_estimators,
128-
cuda=True
127+
estimator=LeNet5, n_estimators=n_estimators, cuda=True
129128
)
130129

131130
# Set the optimizer
@@ -141,14 +140,13 @@ def forward(self, x):
141140
toc = time.time()
142141
evaluating_time = toc - tic
143142

144-
records.append(("VotingClassifier", training_time, evaluating_time,
145-
testing_acc))
143+
records.append(
144+
("VotingClassifier", training_time, evaluating_time, testing_acc)
145+
)
146146

147147
# BaggingClassifier
148148
model = BaggingClassifier(
149-
estimator=LeNet5,
150-
n_estimators=n_estimators,
151-
cuda=True
149+
estimator=LeNet5, n_estimators=n_estimators, cuda=True
152150
)
153151

154152
# Set the optimizer
@@ -164,14 +162,13 @@ def forward(self, x):
164162
toc = time.time()
165163
evaluating_time = toc - tic
166164

167-
records.append(("BaggingClassifier", training_time, evaluating_time,
168-
testing_acc))
165+
records.append(
166+
("BaggingClassifier", training_time, evaluating_time, testing_acc)
167+
)
169168

170169
# GradientBoostingClassifier
171170
model = GradientBoostingClassifier(
172-
estimator=LeNet5,
173-
n_estimators=n_estimators,
174-
cuda=True
171+
estimator=LeNet5, n_estimators=n_estimators, cuda=True
175172
)
176173

177174
# Set the optimizer
@@ -187,14 +184,18 @@ def forward(self, x):
187184
toc = time.time()
188185
evaluating_time = toc - tic
189186

190-
records.append(("GradientBoostingClassifier", training_time,
191-
evaluating_time, testing_acc))
187+
records.append(
188+
(
189+
"GradientBoostingClassifier",
190+
training_time,
191+
evaluating_time,
192+
testing_acc,
193+
)
194+
)
192195

193196
# SnapshotEnsembleClassifier
194197
model = SnapshotEnsembleClassifier(
195-
estimator=LeNet5,
196-
n_estimators=n_estimators,
197-
cuda=True
198+
estimator=LeNet5, n_estimators=n_estimators, cuda=True
198199
)
199200

200201
# Set the optimizer
@@ -210,8 +211,14 @@ def forward(self, x):
210211
toc = time.time()
211212
evaluating_time = toc - tic
212213

213-
records.append(("SnapshotEnsembleClassifier", training_time,
214-
evaluating_time, testing_acc))
214+
records.append(
215+
(
216+
"SnapshotEnsembleClassifier",
217+
training_time,
218+
evaluating_time,
219+
testing_acc,
220+
)
221+
)
215222

216223
# Print results on different ensemble methods
217224
display_records(records, logger)

‎examples/regression_YearPredictionMSD_mlp.py‎

Lines changed: 30 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -102,9 +102,7 @@ def forward(self, x):
102102

103103
# FusionRegressor
104104
model = FusionRegressor(
105-
estimator=MLP,
106-
n_estimators=n_estimators,
107-
cuda=True
105+
estimator=MLP, n_estimators=n_estimators, cuda=True
108106
)
109107

110108
# Set the optimizer
@@ -120,14 +118,13 @@ def forward(self, x):
120118
toc = time.time()
121119
evaluating_time = toc - tic
122120

123-
records.append(("FusionRegressor", training_time, evaluating_time,
124-
testing_mse))
121+
records.append(
122+
("FusionRegressor", training_time, evaluating_time, testing_mse)
123+
)
125124

126125
# VotingRegressor
127126
model = VotingRegressor(
128-
estimator=MLP,
129-
n_estimators=n_estimators,
130-
cuda=True
127+
estimator=MLP, n_estimators=n_estimators, cuda=True
131128
)
132129

133130
# Set the optimizer
@@ -143,14 +140,13 @@ def forward(self, x):
143140
toc = time.time()
144141
evaluating_time = toc - tic
145142

146-
records.append(("VotingRegressor", training_time, evaluating_time,
147-
testing_mse))
143+
records.append(
144+
("VotingRegressor", training_time, evaluating_time, testing_mse)
145+
)
148146

149147
# BaggingRegressor
150148
model = BaggingRegressor(
151-
estimator=MLP,
152-
n_estimators=n_estimators,
153-
cuda=True
149+
estimator=MLP, n_estimators=n_estimators, cuda=True
154150
)
155151

156152
# Set the optimizer
@@ -166,14 +162,13 @@ def forward(self, x):
166162
toc = time.time()
167163
evaluating_time = toc - tic
168164

169-
records.append(("BaggingRegressor", training_time, evaluating_time,
170-
testing_mse))
165+
records.append(
166+
("BaggingRegressor", training_time, evaluating_time, testing_mse)
167+
)
171168

172169
# GradientBoostingRegressor
173170
model = GradientBoostingRegressor(
174-
estimator=MLP,
175-
n_estimators=n_estimators,
176-
cuda=True
171+
estimator=MLP, n_estimators=n_estimators, cuda=True
177172
)
178173

179174
# Set the optimizer
@@ -189,14 +184,18 @@ def forward(self, x):
189184
toc = time.time()
190185
evaluating_time = toc - tic
191186

192-
records.append(("GradientBoostingRegressor", training_time,
193-
evaluating_time, testing_mse))
187+
records.append(
188+
(
189+
"GradientBoostingRegressor",
190+
training_time,
191+
evaluating_time,
192+
testing_mse,
193+
)
194+
)
194195

195196
# SnapshotEnsembleRegressor
196197
model = SnapshotEnsembleRegressor(
197-
estimator=MLP,
198-
n_estimators=n_estimators,
199-
cuda=True
198+
estimator=MLP, n_estimators=n_estimators, cuda=True
200199
)
201200

202201
# Set the optimizer
@@ -212,8 +211,14 @@ def forward(self, x):
212211
toc = time.time()
213212
evaluating_time = toc - tic
214213

215-
records.append(("SnapshotEnsembleRegressor", training_time,
216-
evaluating_time, testing_acc))
214+
records.append(
215+
(
216+
"SnapshotEnsembleRegressor",
217+
training_time,
218+
evaluating_time,
219+
testing_acc,
220+
)
221+
)
217222

218223
# Print results on different ensemble methods
219224
display_records(records, logger)

‎examples/snapshot_ensemble_cifar10_resnet18.py‎

Lines changed: 40 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -16,18 +16,30 @@ class BasicBlock(nn.Module):
1616
def __init__(self, in_planes, planes, stride=1):
1717
super(BasicBlock, self).__init__()
1818
self.conv1 = nn.Conv2d(
19-
in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
19+
in_planes,
20+
planes,
21+
kernel_size=3,
22+
stride=stride,
23+
padding=1,
24+
bias=False,
25+
)
2026
self.bn1 = nn.BatchNorm2d(planes)
21-
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3,
22-
stride=1, padding=1, bias=False)
27+
self.conv2 = nn.Conv2d(
28+
planes, planes, kernel_size=3, stride=1, padding=1, bias=False
29+
)
2330
self.bn2 = nn.BatchNorm2d(planes)
2431

2532
self.shortcut = nn.Sequential()
26-
if stride != 1 or in_planes != self.expansion*planes:
33+
if stride != 1 or in_planes != self.expansion * planes:
2734
self.shortcut = nn.Sequential(
28-
nn.Conv2d(in_planes, self.expansion*planes,
29-
kernel_size=1, stride=stride, bias=False),
30-
nn.BatchNorm2d(self.expansion*planes)
35+
nn.Conv2d(
36+
in_planes,
37+
self.expansion * planes,
38+
kernel_size=1,
39+
stride=stride,
40+
bias=False,
41+
),
42+
nn.BatchNorm2d(self.expansion * planes),
3143
)
3244

3345
def forward(self, x):
@@ -43,17 +55,18 @@ def __init__(self, block, num_blocks, num_classes=10):
4355
super(ResNet, self).__init__()
4456
self.in_planes = 64
4557

46-
self.conv1 = nn.Conv2d(3, 64, kernel_size=3,
47-
stride=1, padding=1, bias=False)
58+
self.conv1 = nn.Conv2d(
59+
3, 64, kernel_size=3, stride=1, padding=1, bias=False
60+
)
4861
self.bn1 = nn.BatchNorm2d(64)
4962
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
5063
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
5164
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
5265
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
53-
self.linear = nn.Linear(512*block.expansion, num_classes)
66+
self.linear = nn.Linear(512 * block.expansion, num_classes)
5467

5568
def _make_layer(self, block, planes, num_blocks, stride):
56-
strides = [stride] + [1]*(num_blocks-1)
69+
strides = [stride] + [1] * (num_blocks - 1)
5770
layers = []
5871
for stride in strides:
5972
layers.append(block(self.in_planes, planes, stride))
@@ -93,29 +106,33 @@ def forward(self, x):
93106
transforms.RandomHorizontalFlip(),
94107
transforms.RandomCrop(32, 4),
95108
transforms.ToTensor(),
96-
transforms.Normalize((0.4914, 0.4822, 0.4465),
97-
(0.2023, 0.1994, 0.2010)),
109+
transforms.Normalize(
110+
(0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)
111+
),
98112
]
99113
)
100114

101115
test_transformer = transforms.Compose(
102116
[
103117
transforms.ToTensor(),
104-
transforms.Normalize((0.4914, 0.4822, 0.4465),
105-
(0.2023, 0.1994, 0.2010)),
118+
transforms.Normalize(
119+
(0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)
120+
),
106121
]
107122
)
108123

109124
train_loader = DataLoader(
110-
datasets.CIFAR10(data_dir, train=True, download=True, transform=train_transformer),
125+
datasets.CIFAR10(
126+
data_dir, train=True, download=True, transform=train_transformer
127+
),
111128
batch_size=batch_size,
112-
shuffle=True
129+
shuffle=True,
113130
)
114131

115132
test_loader = DataLoader(
116133
datasets.CIFAR10(data_dir, train=False, transform=test_transformer),
117134
batch_size=batch_size,
118-
shuffle=True
135+
shuffle=True,
119136
)
120137

121138
# Set the Logger
@@ -126,16 +143,13 @@ def forward(self, x):
126143
estimator=ResNet,
127144
estimator_args={"block": BasicBlock, "num_blocks": [2, 2, 2, 2]},
128145
n_estimators=n_estimators,
129-
cuda=True
146+
cuda=True,
130147
)
131148

132149
# Set the Optimizer
133-
model.set_optimizer("SGD",
134-
lr=lr,
135-
weight_decay=weight_decay,
136-
momentum=momentum)
150+
model.set_optimizer(
151+
"SGD", lr=lr, weight_decay=weight_decay, momentum=momentum
152+
)
137153

138154
# Train and Evaluate
139-
model.fit(train_loader,
140-
epochs=epochs,
141-
test_loader=test_loader)
155+
model.fit(train_loader, epochs=epochs, test_loader=test_loader)

0 commit comments

Comments
 (0)