|
| 1 | +import tensorflow as tf |
| 2 | +from tensorflow.keras import layers |
| 3 | +from tensorflow.keras.applications import EfficientNetB0, ResNet50 |
| 4 | +from tensorflow.keras.applications import ResNet50, VGG16 |
| 5 | +from tensorflow.keras.layers import Input, Dense, GlobalAveragePooling2D |
| 6 | + |
| 7 | +# EfficientNet model |
| 8 | +def build_efficientnet_model(num_classes): |
| 9 | + inputs = Input(shape=(224, 224, 3)) |
| 10 | + base_model = EfficientNetB0(include_top=False, input_tensor=inputs, weights='imagenet') |
| 11 | + base_model.trainable = True |
| 12 | + |
| 13 | + x = GlobalAveragePooling2D(name="avg_pool")(base_model.output) |
| 14 | + x = layers.BatchNormalization()(x) |
| 15 | + x = layers.Dropout(0.4, name="top_dropout")(x) |
| 16 | + outputs = Dense(num_classes, activation="softmax", name="pred")(x) |
| 17 | + |
| 18 | + model = tf.keras.Model(inputs, outputs) |
| 19 | + model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) |
| 20 | + return model |
| 21 | + |
| 22 | +# ResNet model |
| 23 | +def build_resnet_model(num_classes): |
| 24 | + inputs = Input(shape=(224, 224, 3)) |
| 25 | + base_model = ResNet50(include_top=False, input_tensor=inputs, weights='imagenet') |
| 26 | + base_model.trainable = True |
| 27 | + |
| 28 | + x = GlobalAveragePooling2D(name="avg_pool")(base_model.output) |
| 29 | + x = layers.BatchNormalization()(x) |
| 30 | + x = layers.Dropout(0.4, name="top_dropout")(x) |
| 31 | + outputs = Dense(num_classes, activation="softmax", name="pred")(x) |
| 32 | + |
| 33 | + model = tf.keras.Model(inputs, outputs) |
| 34 | + model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) |
| 35 | + return model |
| 36 | + |
| 37 | +# Vision Transformer model (ViT) |
| 38 | +def build_vit_model(num_classes): |
| 39 | + from tensorflow.keras.layers import Dense, Flatten, Dropout |
| 40 | + from tensorflow.keras.applications import VGG16 |
| 41 | + |
| 42 | + # Here, we can replace VGG16 with an actual ViT implementation or a library that supports it |
| 43 | + # For demonstration, I will use a placeholder approach |
| 44 | + # Note: You need to implement or import a ViT architecture from a compatible library |
| 45 | + inputs = Input(shape=(224, 224, 3)) |
| 46 | + base_model = VGG16(include_top=False, input_tensor=inputs, weights='imagenet') |
| 47 | + base_model.trainable = True |
| 48 | + |
| 49 | + x = Flatten()(base_model.output) |
| 50 | + x = Dropout(0.5)(x) |
| 51 | + outputs = Dense(num_classes, activation='softmax')(x) |
| 52 | + |
| 53 | + model = tf.keras.Model(inputs, outputs) |
| 54 | + model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) |
| 55 | + return model |
0 commit comments