1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
|
- import os
- from keras import layers
- from keras import models
- from keras import optimizers
- from keras.preprocessing.image import ImageDataGenerator
- import pickle
- import conf
- model = models.Sequential()
- model.add(layers.Conv2D(32, (3, 3), activation='relu',
- input_shape=(150, 150, 3)))
- model.add(layers.MaxPooling2D((2, 2)))
- model.add(layers.Conv2D(64, (3, 3), activation='relu'))
- model.add(layers.MaxPooling2D((2, 2)))
- model.add(layers.Conv2D(128, (3, 3), activation='relu'))
- model.add(layers.MaxPooling2D((2, 2)))
- model.add(layers.Conv2D(128, (3, 3), activation='relu'))
- model.add(layers.MaxPooling2D((2, 2)))
- model.add(layers.Flatten())
- model.add(layers.Dense(512, activation='relu'))
- model.add(layers.Dense(1, activation='sigmoid'))
- model.compile(loss='binary_crossentropy',
- optimizer=optimizers.RMSprop(lr=1e-4),
- metrics=['acc'])
- # All images will be rescaled by 1./255
- train_datagen = ImageDataGenerator(rescale=1./255)
- test_datagen = ImageDataGenerator(rescale=1./255)
- train_generator = train_datagen.flow_from_directory(
- # This is the target directory
- conf.train_dir,
- # All images will be resized to 150x150
- target_size=(150, 150),
- batch_size=20,
- # Since we use binary_crossentropy loss, we need binary labels
- class_mode='binary')
- validation_generator = test_datagen.flow_from_directory(
- conf.validation_dir,
- target_size=(150, 150),
- batch_size=20,
- class_mode='binary')
- hist = model.fit_generator(
- train_generator,
- steps_per_epoch=100,
- epochs=30,
- validation_data=validation_generator,
- validation_steps=50)
- model.save(os.path.join(conf.data_dir, 'model.h5'))
- pickle.dump(hist.history, open(os.path.join(conf.data_dir, 'history.p'), 'wb'))
|