CNN 卷积神经网络TensorFlow简单实现
神经网络 实现 简单 Tensorflow 卷积 CNN
2023-09-14 09:09:29 时间
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 15 21:49:08 2018
@author: luogan
"""
import tensorflow as tf
from sklearn.datasets import load_digits
import numpy as np
digits = load_digits()
X_data = digits.data.astype(np.float32)
Y_data = digits.target.astype(np.float32).reshape(-1,1)
print (X_data.shape)
print (Y_data.shape)
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler()
X_data = scaler.fit_transform(X_data)
from sklearn.preprocessing import OneHotEncoder
Y = OneHotEncoder().fit_transform(Y_data).todense() #one-hot编码
X = X_data.reshape(-1,8,8,1)
batch_size = 8 # 使用MBGD算法,设定batch_size为8
def generatebatch(X,Y,n_examples, batch_size):
for batch_i in range(n_examples // batch_size):
start = batch_i*batch_size
end = start + batch_size
batch_xs = X[start:end]
batch_ys = Y[start:end]
yield batch_xs, batch_ys # 生成每一个batch
tf.reset_default_graph()
# 输入层
tf_X = tf.placeholder(tf.float32,[None,8,8,1])
tf_Y = tf.placeholder(tf.float32,[None,10])
# 卷积层+激活层
conv_filter_w1 = tf.Variable(tf.random_normal([3, 3, 1, 10]))
conv_filter_b1 = tf.Variable(tf.random_normal([10]))
relu_feature_maps1 = tf.nn.relu(\
tf.nn.conv2d(tf_X, conv_filter_w1,strides=[1, 1, 1, 1], padding='SAME') + conv_filter_b1)
# 池化层
max_pool1 = tf.nn.max_pool(relu_feature_maps1,ksize=[1,3,3,1],strides=[1,2,2,1],padding='SAME')
print (max_pool1)
conv_filter_w2 = tf.Variable(tf.random_normal([3, 3, 10, 5]))
conv_filter_b2 = tf.Variable(tf.random_normal([5]))
conv_out2 = tf.nn.conv2d(relu_feature_maps1, conv_filter_w2,strides=[1, 2, 2, 1], padding='SAME') + conv_filter_b2
print (conv_out2)
batch_mean, batch_var = tf.nn.moments(conv_out2, [0, 1, 2], keep_dims=True)
shift = tf.Variable(tf.zeros([5]))
scale = tf.Variable(tf.ones([5]))
epsilon = 1e-3
BN_out = tf.nn.batch_normalization(conv_out2, batch_mean, batch_var, shift, scale, epsilon)
print (BN_out)
relu_BN_maps2 = tf.nn.relu(BN_out)
max_pool2 = tf.nn.max_pool(relu_BN_maps2,ksize=[1,3,3,1],strides=[1,2,2,1],padding='SAME')
print (max_pool2)
max_pool2_flat = tf.reshape(max_pool2, [-1, 2*2*5])
# 全连接层
fc_w1 = tf.Variable(tf.random_normal([2*2*5,50]))
fc_b1 = tf.Variable(tf.random_normal([50]))
fc_out1 = tf.nn.relu(tf.matmul(max_pool2_flat, fc_w1) + fc_b1)
# 输出层
out_w1 = tf.Variable(tf.random_normal([50,10]))
out_b1 = tf.Variable(tf.random_normal([10]))
pred = tf.nn.softmax(tf.matmul(fc_out1,out_w1)+out_b1)
loss = -tf.reduce_mean(tf_Y*tf.log(tf.clip_by_value(pred,1e-11,1.0)))
train_step = tf.train.AdamOptimizer(1e-3).minimize(loss)
y_pred = tf.arg_max(pred,1)
bool_pred = tf.equal(tf.arg_max(tf_Y,1),y_pred)
accuracy = tf.reduce_mean(tf.cast(bool_pred,tf.float32)) # 准确率
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for epoch in range(1000): # 迭代1000个周期
for batch_xs,batch_ys in generatebatch(X,Y,Y.shape[0],batch_size): # 每个周期进行MBGD算法
sess.run(train_step,feed_dict={tf_X:batch_xs,tf_Y:batch_ys})
if(epoch%100==0):
res = sess.run(accuracy,feed_dict={tf_X:X,tf_Y:Y})
print (epoch,res)
res_ypred = y_pred.eval(feed_dict={tf_X:X,tf_Y:Y}).flatten() # 只能预测一批样本,不能预测一个样本
print (res_ypred)
from sklearn.metrics import accuracy_score
print accuracy_score(Y_data,res_ypred.reshape(-1,1))
相关文章
- 经典神经网络 | VGGNet 论文解析及代码实现
- 经典神经网络 | GoogleNet 论文解析及代码实现
- Java实现神经网络激活函数Sigmoid
- 重温三十年前对于 NN 的批判:神经网络无法实现可解释 AI
- 【视频】CNN(卷积神经网络)模型以及R语言实现回归数据分析|附代码数据
- 卷积神经网络的卷积层_卷积神经网络详解
- 隐私计算FATE-多分类神经网络算法测试
- 神经网络 | 感知器原理及python代码实现and和or函数
- 【深度学习项目一】全连接神经网络实现mnist数字识别
- 图神经网络再拿顶会最佳论文! KDD22 FederatedScope-GNN
- R语言实现拟合神经网络预测和结果可视化|附代码数据
- 使用PyTorch实现简单的AlphaZero的算法(3):神经网络架构和自学习
- NeurIPS'22 | 具有自适应读出的图神经网络
- 【视频】CNN(卷积神经网络)模型以及R语言实现回归数据分析|附代码数据
- 一篇文章教你用 11 行 Python 代码实现神经网络