diff --git "a/\347\214\253\347\213\227\350\257\206\345\210\253/.idea/.gitignore" "b/\347\214\253\347\213\227\350\257\206\345\210\253/.idea/.gitignore"
new file mode 100644
index 0000000..13566b8
--- /dev/null
+++ "b/\347\214\253\347\213\227\350\257\206\345\210\253/.idea/.gitignore"
@@ -0,0 +1,8 @@
+# Default ignored files
+/shelf/
+/workspace.xml
+# Editor-based HTTP Client requests
+/httpRequests/
+# Datasource local storage ignored files
+/dataSources/
+/dataSources.local.xml
diff --git "a/\347\214\253\347\213\227\350\257\206\345\210\253/.idea/inspectionProfiles/profiles_settings.xml" "b/\347\214\253\347\213\227\350\257\206\345\210\253/.idea/inspectionProfiles/profiles_settings.xml"
new file mode 100644
index 0000000..105ce2d
--- /dev/null
+++ "b/\347\214\253\347\213\227\350\257\206\345\210\253/.idea/inspectionProfiles/profiles_settings.xml"
@@ -0,0 +1,6 @@
+
+
+
+
+
+
\ No newline at end of file
diff --git "a/\347\214\253\347\213\227\350\257\206\345\210\253/.idea/misc.xml" "b/\347\214\253\347\213\227\350\257\206\345\210\253/.idea/misc.xml"
new file mode 100644
index 0000000..a6218fe
--- /dev/null
+++ "b/\347\214\253\347\213\227\350\257\206\345\210\253/.idea/misc.xml"
@@ -0,0 +1,7 @@
+
+
+
+
+
+
+
\ No newline at end of file
diff --git "a/\347\214\253\347\213\227\350\257\206\345\210\253/.idea/modules.xml" "b/\347\214\253\347\213\227\350\257\206\345\210\253/.idea/modules.xml"
new file mode 100644
index 0000000..763d09a
--- /dev/null
+++ "b/\347\214\253\347\213\227\350\257\206\345\210\253/.idea/modules.xml"
@@ -0,0 +1,8 @@
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git "a/\347\214\253\347\213\227\350\257\206\345\210\253/.idea/vcs.xml" "b/\347\214\253\347\213\227\350\257\206\345\210\253/.idea/vcs.xml"
new file mode 100644
index 0000000..6c0b863
--- /dev/null
+++ "b/\347\214\253\347\213\227\350\257\206\345\210\253/.idea/vcs.xml"
@@ -0,0 +1,6 @@
+
+
+
+
+
+
\ No newline at end of file
diff --git "a/\347\214\253\347\213\227\350\257\206\345\210\253/.idea/\347\214\253\347\213\227\350\257\206\345\210\253.iml" "b/\347\214\253\347\213\227\350\257\206\345\210\253/.idea/\347\214\253\347\213\227\350\257\206\345\210\253.iml"
new file mode 100644
index 0000000..d0876a7
--- /dev/null
+++ "b/\347\214\253\347\213\227\350\257\206\345\210\253/.idea/\347\214\253\347\213\227\350\257\206\345\210\253.iml"
@@ -0,0 +1,8 @@
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git "a/\347\214\253\347\213\227\350\257\206\345\210\253/model.py" "b/\347\214\253\347\213\227\350\257\206\345\210\253/model.py"
index 572472c..6393c0a 100644
--- "a/\347\214\253\347\213\227\350\257\206\345\210\253/model.py"
+++ "b/\347\214\253\347\213\227\350\257\206\345\210\253/model.py"
@@ -1,5 +1,5 @@
-#coding=utf-8
-import tensorflow as tf
+import tensorflow as tf
+from tensorflow.keras import layers, models
# 结构
# conv1 卷积层 1
# pooling1_lrn 池化层 1
@@ -8,104 +8,59 @@
# local3 全连接层 1
# local4 全连接层 2
# softmax 全连接层 3
-def inference(images, batch_size, n_classes):
-
- with tf.variable_scope('conv1') as scope:
- # 卷积盒的为 3*3 的卷积盒,图片厚度是3,输出是16个featuremap
- weights = tf.get_variable('weights',
- shape=[3, 3, 3, 16],
- dtype=tf.float32,
- initializer=tf.truncated_normal_initializer(stddev=0.1, dtype=tf.float32))
- biases = tf.get_variable('biases',
- shape=[16],
- dtype=tf.float32,
- initializer=tf.constant_initializer(0.1))
- conv = tf.nn.conv2d(images, weights, strides=[1, 1, 1, 1], padding='SAME')
- pre_activation = tf.nn.bias_add(conv, biases)
- conv1 = tf.nn.relu(pre_activation, name=scope.name)
-
- with tf.variable_scope('pooling1_lrn') as scope:
- pool1 = tf.nn.max_pool(conv1, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME', name='pooling1')
- norm1 = tf.nn.lrn(pool1, depth_radius=4, bias=1.0, alpha=0.001 / 9.0, beta=0.75, name='norm1')
-
- with tf.variable_scope('conv2') as scope:
- weights = tf.get_variable('weights',
- shape=[3, 3, 16, 16],
- dtype=tf.float32,
- initializer=tf.truncated_normal_initializer(stddev=0.1, dtype=tf.float32))
- biases = tf.get_variable('biases',
- shape=[16],
- dtype=tf.float32,
- initializer=tf.constant_initializer(0.1))
- conv = tf.nn.conv2d(norm1, weights, strides=[1, 1, 1, 1], padding='SAME')
- pre_activation = tf.nn.bias_add(conv, biases)
- conv2 = tf.nn.relu(pre_activation, name='conv2')
-
- # pool2 and norm2
- with tf.variable_scope('pooling2_lrn') as scope:
- norm2 = tf.nn.lrn(conv2, depth_radius=4, bias=1.0, alpha=0.001 / 9.0, beta=0.75, name='norm2')
- pool2 = tf.nn.max_pool(norm2, ksize=[1, 3, 3, 1], strides=[1, 1, 1, 1], padding='SAME', name='pooling2')
-
- with tf.variable_scope('local3') as scope:
- reshape = tf.reshape(pool2, shape=[batch_size, -1])
- dim = reshape.get_shape()[1].value
- weights = tf.get_variable('weights',
- shape=[dim, 128],
- dtype=tf.float32,
- initializer=tf.truncated_normal_initializer(stddev=0.005, dtype=tf.float32))
- biases = tf.get_variable('biases',
- shape=[128],
- dtype=tf.float32,
- initializer=tf.constant_initializer(0.1))
- local3 = tf.nn.relu(tf.matmul(reshape, weights) + biases, name=scope.name)
-
- # local4
- with tf.variable_scope('local4') as scope:
- weights = tf.get_variable('weights',
- shape=[128, 128],
- dtype=tf.float32,
- initializer=tf.truncated_normal_initializer(stddev=0.005, dtype=tf.float32))
- biases = tf.get_variable('biases',
- shape=[128],
- dtype=tf.float32,
- initializer=tf.constant_initializer(0.1))
- local4 = tf.nn.relu(tf.matmul(local3, weights) + biases, name='local4')
-
- # softmax
- with tf.variable_scope('softmax_linear') as scope:
- weights = tf.get_variable('softmax_linear',
- shape=[128, n_classes],
- dtype=tf.float32,
- initializer=tf.truncated_normal_initializer(stddev=0.005, dtype=tf.float32))
- biases = tf.get_variable('biases',
- shape=[n_classes],
- dtype=tf.float32,
- initializer=tf.constant_initializer(0.1))
- softmax_linear = tf.add(tf.matmul(local4, weights), biases, name='softmax_linear')
-
- return softmax_linear
-
-
-
-def losses(logits, labels):
- with tf.variable_scope('loss') as scope:
- cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits \
- (logits=logits, labels=labels, name='xentropy_per_example')
- loss = tf.reduce_mean(cross_entropy, name='loss')
- tf.summary.scalar(scope.name + '/loss', loss)
- return loss
-
-def trainning(loss, learning_rate):
- with tf.name_scope('optimizer'):
- optimizer = tf.train.AdamOptimizer(learning_rate= learning_rate)
- global_step = tf.Variable(0, name='global_step', trainable=False)
- train_op = optimizer.minimize(loss, global_step= global_step)
- return train_op
-
-def evaluation(logits, labels):
- with tf.variable_scope('accuracy') as scope:
- correct = tf.nn.in_top_k(logits, labels, 1)
- correct = tf.cast(correct, tf.float16)
- accuracy = tf.reduce_mean(correct)
- tf.summary.scalar(scope.name + '/accuracy', accuracy)
- return accuracy
\ No newline at end of file
+
+def inference(input_shape, n_classes):
+ model = models.Sequential()
+
+ #修改
+ model.add(layers.Input(shape=input_shape)) # 使用 Input 层定义输入形状
+
+ # Conv1,第一个卷积层,使用3x3的卷积核,输出16个特征图,使用ReLU激活函数
+ model.add(layers.Conv2D(16, (3, 3), activation='relu', padding='same', name='conv1'))
+
+ # Pooling1_lrn,添加一个最大池化层,使用3x3的池化窗口,步幅为2x2,然后进行批量归一化
+ model.add(layers.MaxPooling2D((3, 3), strides=(2, 2), padding='same', name='pooling1'))
+ model.add(layers.BatchNormalization(name='norm1'))
+
+ # Conv2,第二个卷积层,使用3x3的卷积核,输出16个特征图,使用ReLU激活函数
+ model.add(layers.Conv2D(16, (3, 3), activation='relu', padding='same', name='conv2'))
+
+ # Pooling2_lrn,进行批量归一化,然后添加一个最大池化层,使用3x3的池化窗口,步幅为1x1
+ model.add(layers.BatchNormalization(name='norm2'))
+ model.add(layers.MaxPooling2D((3, 3), strides=(1, 1), padding='same', name='pooling2'))
+
+ # Flatten,将多维输入一维化,为全连接层做准备
+ model.add(layers.Flatten())
+
+ # Local3,第一个全连接层,有128个神经元,使用ReLU激活函数
+ model.add(layers.Dense(128, activation='relu', name='local3'))
+
+ # Local4,第二个全连接层,有128个神经元,使用ReLU激活函数
+ model.add(layers.Dense(128, activation='relu', name='local4'))
+
+ # Softmax,输出层,有n_classes个神经元,使用softmax激活函数
+ model.add(layers.Dense(n_classes, activation='softmax', name='softmax_linear'))
+
+ return model
+
+
+# 计算模型的损失,SparseCategoricalCrossentropy是一个用于多分类问题的损失函数,适用于标签是整数的情况
+def losses(logits, labels):
+ loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
+ loss = loss_fn(labels, logits)
+ return loss
+
+
+# 定义模型的训练过程
+def trainning(model, loss, learning_rate):
+ # 使用Adam优化器,learning_rate是学习率
+ optimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate)
+ # compile: 编译模型,指定优化器、损失函数和评估指标(准确率)
+ model.compile(optimizer=optimizer, loss=loss, metrics=['accuracy'])
+ return model
+
+
+# 评估模型的性能
+def evaluation(model, images, labels):
+ loss, accuracy = model.evaluate(images, labels)
+ return accuracy