TensorFlow模型实现:UNet模型
实现 模型 Tensorflow
2023-09-11 14:19:30 时间
TensorFlow模型实现:UNet模型
1.UNet模型
# -*-coding: utf-8 -*-
"""
@Project: triple_path_networks
@File : UNet.py
@Author : panjq
@E-mail : pan_jinquan@163.com
@Date : 2019-01-24 11:18:15
"""
import tensorflow as tf
import tensorflow.contrib.slim as slim
def lrelu(x):
return tf.maximum(x * 0.2, x)
activation_fn=lrelu
def UNet(inputs, reg): # Unet
conv1 = slim.conv2d(inputs, 32, [3, 3], rate=1, activation_fn=activation_fn, scope='g_conv1_1', weights_regularizer=reg)
conv1 = slim.conv2d(conv1, 32, [3, 3], rate=1, activation_fn=activation_fn, scope='g_conv1_2',weights_regularizer=reg)
pool1 = slim.max_pool2d(conv1, [2, 2], padding='SAME')
conv2 = slim.conv2d(pool1, 64, [3, 3], rate=1, activation_fn=activation_fn, scope='g_conv2_1',weights_regularizer=reg)
conv2 = slim.conv2d(conv2, 64, [3, 3], rate=1, activation_fn=activation_fn, scope='g_conv2_2',weights_regularizer=reg)
pool2 = slim.max_pool2d(conv2, [2, 2], padding='SAME')
conv3 = slim.conv2d(pool2, 128, [3, 3], rate=1, activation_fn=activation_fn, scope='g_conv3_1',weights_regularizer=reg)
conv3 = slim.conv2d(conv3, 128, [3, 3], rate=1, activation_fn=activation_fn, scope='g_conv3_2',weights_regularizer=reg)
pool3 = slim.max_pool2d(conv3, [2, 2], padding='SAME')
conv4 = slim.conv2d(pool3, 256, [3, 3], rate=1, activation_fn=activation_fn, scope='g_conv4_1',weights_regularizer=reg)
conv4 = slim.conv2d(conv4, 256, [3, 3], rate=1, activation_fn=activation_fn, scope='g_conv4_2',weights_regularizer=reg)
pool4 = slim.max_pool2d(conv4, [2, 2], padding='SAME')
conv5 = slim.conv2d(pool4, 512, [3, 3], rate=1, activation_fn=activation_fn, scope='g_conv5_1',weights_regularizer=reg)
conv5 = slim.conv2d(conv5, 512, [3, 3], rate=1, activation_fn=activation_fn, scope='g_conv5_2',weights_regularizer=reg)
up6 = upsample_and_concat(conv5, conv4, 256, 512)
conv6 = slim.conv2d(up6, 256, [3, 3], rate=1, activation_fn=activation_fn, scope='g_conv6_1',weights_regularizer=reg)
conv6 = slim.conv2d(conv6, 256, [3, 3], rate=1, activation_fn=activation_fn, scope='g_conv6_2',weights_regularizer=reg)
up7 = upsample_and_concat(conv6, conv3, 128, 256)
conv7 = slim.conv2d(up7, 128, [3, 3], rate=1, activation_fn=activation_fn, scope='g_conv7_1',weights_regularizer=reg)
conv7 = slim.conv2d(conv7, 128, [3, 3], rate=1, activation_fn=activation_fn, scope='g_conv7_2',weights_regularizer=reg)
up8 = upsample_and_concat(conv7, conv2, 64, 128)
conv8 = slim.conv2d(up8, 64, [3, 3], rate=1, activation_fn=activation_fn, scope='g_conv8_1',weights_regularizer=reg)
conv8 = slim.conv2d(conv8, 64, [3, 3], rate=1, activation_fn=activation_fn, scope='g_conv8_2',weights_regularizer=reg)
up9 = upsample_and_concat(conv8, conv1, 32, 64)
conv9 = slim.conv2d(up9, 32, [3, 3], rate=1, activation_fn=activation_fn, scope='g_conv9_1', weights_regularizer=reg)
conv9 = slim.conv2d(conv9, 32, [3, 3], rate=1, activation_fn=activation_fn, scope='g_conv9_2',weights_regularizer=reg)
print("conv9.shape:{}".format(conv9.get_shape()))
type='UNet_1X'
with tf.variable_scope(name_or_scope="output"):
if type=='UNet_3X':#UNet放大三倍
conv10 = slim.conv2d(conv9, 27, [1, 1], rate=1, activation_fn=None, scope='g_conv10',weights_regularizer=reg)
out = tf.depth_to_space(conv10, 3)
if type=='UNet_1X':#输入输出维度相同
out = slim.conv2d(conv9, 6, [1, 1], rate=1, activation_fn=None, scope='g_conv10',weights_regularizer=reg)
return out
def upsample_and_concat(x1, x2, output_channels, in_channels):
pool_size = 2
deconv_filter = tf.Variable(tf.truncated_normal([pool_size, pool_size, output_channels, in_channels], stddev=0.02))
deconv = tf.nn.conv2d_transpose(x1, deconv_filter, tf.shape(x2), strides=[1, pool_size, pool_size, 1])
deconv_output = tf.concat([deconv, x2], 3)
deconv_output.set_shape([None, None, None, output_channels * 2])
return deconv_output
if __name__=="__main__":
weight_decay=0.001
reg = slim.l2_regularizer(scale=weight_decay)
inputs = tf.ones(shape=[4, 100, 200, 3])
out=UNet(inputs,reg)
print("net1.shape:{}".format(inputs.get_shape()))
print("out.shape:{}".format(out.get_shape()))
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
相关文章
- Nagios 快速实现数据可视化的几种方式
- java实现 洛谷 P1018 乘积最大
- Java实现 蓝桥杯 算法提高 道路和航路
- Java实现拓扑排序
- BaseServlet优化Servlet,实现类似struts2的一些简单效果
- Hibernate+maven+eclipse 实现自动建表
- jQuery+PHP实现的砸金蛋中奖程序
- flex 操作xml 实现增删改查 .
- 分布式锁的实现
- Java GUI编程:swing实现上传tiff文件至hdfs功能
- CRM Fiori my note应用的后台ABAP实现
- 【彩票】彩票预测算法(一):离散型马尔可夫链模型C#实现
- ML:分类预测任务中模型评估指标(ER/混淆矩阵ACC、Precision、Recall、AP、mAP、F1、ROC-AUC)简介、使用方法、代码实现、案例应用之详细攻略
- ML之LIME:基于boston波士顿房价数据集回归预测利用LIME/SP-LIME局部解释图/权重图结合RF随机森林模型实现模型事后解释案例之详细攻略
- TF之Transformer:基于tensorflow和Keras框架(特征编码+Tokenizer处理文本+保存模型)针对UCI新闻数据集利用Transformer算法实现新闻文本多分类案例
- ML之ME/LF:基于不同机器学习框架(sklearn/TF)下算法的模型评估指标(损失函数)代码实现及其函数(Scoring/metrics)代码实现(仅代码)
- 基于弹性配电网划分模型研究【IEEE33节点】(Matlab代码实现)
- 基于双向LSTM模型进行电力需求预测(Matlab代码实现)
- 用于灵敏性分析的方法模型(Matlab代码实现)
- 基于蒙特卡洛法的规模化电动车有序充放电及负荷预测(Python&Matlab实现)
- 深度学习入门,keras实现回归模型
- 如何基于MindSpore实现万亿级参数模型算法?
- Python实现贝叶斯优化器(Bayes_opt)优化支持向量机回归模型(SVR算法)项目实战
- Python实现直方图梯度提升回归模型(HistGradientBoostingRegressor算法)并基于网格搜索进行优化同时绘制PDP依赖图项目实战
- Qt之实现屏保功能
- Python实现一个最简单的MapReduce编程模型WordCount
- AI模型设计必备:PyTorch与TensorFlow模型C++与python实现学习资料
- 【灵敏性】基于巴顿模型计算输入空间频率的对比敏感度值研究(matlab代码实现)
- 基于LEACH和HEED的WSN路由协议研究与改进(Matlab代码实现)
- 战胜疫情——简单的冠状病毒模型(Matlab代码实现)
- 使用MindSpore20.0的API快速实现深度学习模型之数据变换