Skip to content

Commit da8ff62

Browse files
committed
Fixed some bug
1 parent dc2b6af commit da8ff62

2 files changed

Lines changed: 42 additions & 17 deletions

File tree

‎resnet_cifar.py‎

Lines changed: 19 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -10,12 +10,16 @@
1010
HEIGHT = 32
1111
WIDTH = 32
1212
NUM_CHANNELS = 3
13+
num_classes = 10
14+
15+
num_gpus = 2
16+
1317
INIT_LR = 1e-3
1418
num_train_samples = 40000
15-
bs_per_gpu = 128
16-
num_gpus = 1
17-
num_epochs = 10
18-
num_classes = 10
19+
bs_per_gpu = 125
20+
num_epochs = 20
21+
epochs_drop = 5.0
22+
1923

2024
class LRTensorBoard(TensorBoard):
2125
def __init__(self, log_dir, update_freq, histogram_freq): # add other arguments to __init__ if you need
@@ -29,7 +33,7 @@ def on_epoch_end(self, epoch, logs=None):
2933

3034

3135
def preprocess(x, y):
32-
image = tf.image.per_image_standardization(x)
36+
x = tf.image.per_image_standardization(x)
3337
return x, y
3438

3539

@@ -49,7 +53,7 @@ def augmentation(x, y):
4953
def schedule(epoch):
5054
initial_lrate = INIT_LR
5155
drop = 0.5
52-
epochs_drop = 2.0
56+
5357
lrate = initial_lrate * math.pow(drop, math.floor((1+epoch)/epochs_drop))
5458
return lrate
5559

@@ -58,6 +62,7 @@ def schedule(epoch):
5862
x_val = x[num_train_samples:, :]
5963
y_val = y[num_train_samples:, :]
6064

65+
6166
x = x[:num_train_samples, :]
6267
y = y[:num_train_samples, :]
6368

@@ -77,6 +82,14 @@ def schedule(epoch):
7782
optimizer=keras.optimizers.Adam(learning_rate=INIT_LR),
7883
loss='sparse_categorical_crossentropy',
7984
metrics=['accuracy'])
85+
else:
86+
mirrored_strategy = tf.distribute.MirroredStrategy()
87+
with mirrored_strategy.scope():
88+
model = resnet.resnet56(classes=num_classes)
89+
model.compile(
90+
optimizer=keras.optimizers.Adam(learning_rate=INIT_LR),
91+
loss='sparse_categorical_crossentropy',
92+
metrics=['accuracy'])
8093

8194
log_dir="logs/fit/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
8295
tensorboard_callback = LRTensorBoard(

‎vgg_cifar.py‎

Lines changed: 23 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -10,11 +10,15 @@
1010
HEIGHT = 32
1111
WIDTH = 32
1212
NUM_CHANNELS = 3
13+
num_classes = 10
14+
15+
num_gpus = 2
16+
1317
INIT_LR = 1e-3
1418
num_train_samples = 40000
15-
bs_per_gpu = 128
16-
num_gpus = 1
17-
num_epochs = 10
19+
bs_per_gpu = 125
20+
num_epochs = 20
21+
epochs_drop = 5.0
1822

1923
class LRTensorBoard(TensorBoard):
2024
def __init__(self, log_dir, update_freq, histogram_freq): # add other arguments to __init__ if you need
@@ -28,7 +32,7 @@ def on_epoch_end(self, epoch, logs=None):
2832

2933

3034
def preprocess(x, y):
31-
image = tf.image.per_image_standardization(x)
35+
x = tf.image.per_image_standardization(x)
3236
return x, y
3337

3438

@@ -45,13 +49,13 @@ def augmentation(x, y):
4549
return x, y
4650

4751

48-
def schedule(epoch):
49-
initial_lrate = INIT_LR
50-
drop = 0.5
51-
epochs_drop = 2.0
52-
lrate = initial_lrate * math.pow(drop, math.floor((1+epoch)/epochs_drop))
53-
return lrate
5452

53+
def schedule(epoch):
54+
initial_lrate = INIT_LR
55+
drop = 0.5
56+
57+
lrate = initial_lrate * math.pow(drop, math.floor((1+epoch)/epochs_drop))
58+
return lrate
5559

5660
def VGG16(input_shape):
5761
# Do not use subclass for easier save/load model and print summary
@@ -183,7 +187,15 @@ def VGG16(input_shape):
183187
optimizer=keras.optimizers.Adam(learning_rate=INIT_LR),
184188
loss='sparse_categorical_crossentropy',
185189
metrics=['accuracy'])
186-
190+
else:
191+
mirrored_strategy = tf.distribute.MirroredStrategy()
192+
with mirrored_strategy.scope():
193+
model = VGG16([32, 32, 3])
194+
model.compile(
195+
optimizer=keras.optimizers.Adam(learning_rate=INIT_LR),
196+
loss='sparse_categorical_crossentropy',
197+
metrics=['accuracy'])
198+
187199
log_dir="logs/fit/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
188200
tensorboard_callback = LRTensorBoard(
189201
log_dir=log_dir,

0 commit comments

Comments
 (0)