Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- def u_net(x, is_train = False, reuse = False, n_out = 1, pad='SAME', activation = tf.nn.relu, depth = 0.5):
- _, nx, ny, nz = x.get_shape().as_list()
- with tf.variable_scope("u_net", reuse=reuse):
- w_init = tf.truncated_normal_initializer(stddev=0.01)
- b_init = tf.constant_initializer(value=0.0)
- gamma_init=tf.random_normal_initializer(1., 0.02)
- tl.layers.set_name_reuse(reuse)
- inputs = InputLayer(x, name = 'inputs')
- conv1 = Conv2d(inputs, int(64 * depth), (3, 3), act=None, padding=pad, W_init=w_init, b_init=b_init, name='conv_d0a-b')
- conv1 = Conv2d(conv1, int(64 * depth), (3, 3), act=None, padding=pad, W_init=w_init, b_init=b_init, name='conv_d0b-c')
- pool1 = MaxPool2d(conv1, (2, 2), name='pool1')
- pool1 = BatchNormLayer(pool1, act = activation, is_train=is_train, gamma_init=gamma_init, name='bn1')
- conv2 = Conv2d(pool1, int(128 * depth), (3, 3), act=None, padding=pad, W_init=w_init, b_init=b_init, name='conv_d1a-b')
- conv2 = Conv2d(conv2, int(128 * depth), (3, 3), act=None, padding=pad, W_init=w_init, b_init=b_init, name='conv_d1b-c')
- pool2 = MaxPool2d(conv2, (2, 2), name='pool2')
- pool2 = BatchNormLayer(pool2, act = activation, is_train=is_train, gamma_init=gamma_init, name='bn2')
- conv3 = Conv2d(pool2, int(256 * depth), (3, 3), act=None, padding=pad, W_init=w_init, b_init=b_init, name='conv_d2a-b')
- conv3 = Conv2d(conv3, int(256 * depth), (3, 3), act=None, padding=pad, W_init=w_init, b_init=b_init, name='conv_d2b-c')
- pool3 = MaxPool2d(conv3, (2, 2), name='pool3')
- pool3 = BatchNormLayer(pool3, act = activation, is_train=is_train, gamma_init=gamma_init, name='bn3')
- conv4 = Conv2d(pool3, int(512 * depth), (3, 3), act=None, padding=pad, W_init=w_init, b_init=b_init, name='conv_d3a-b')
- conv4 = Conv2d(conv4, int(512 * depth), (3, 3), act=None, padding=pad, W_init=w_init, b_init=b_init, name='conv_d3b-c')
- pool4 = MaxPool2d(conv4, (2, 2), name='pool4')
- pool4 = BatchNormLayer(pool4, act = activation, is_train=is_train, gamma_init=gamma_init, name='bn4')
- conv5 = Conv2d(pool4, int(1024 * depth), (3, 3), act=None, padding=pad, W_init=w_init, b_init=b_init, name='conv_d4a-b')
- conv5 = Conv2d(conv5, int(1024 * depth), (3, 3), act=None, padding=pad, W_init=w_init, b_init=b_init, name='conv_d4b-c')
- conv5 = BatchNormLayer(conv5, act = activation, is_train=is_train, gamma_init=gamma_init, name='bn4_1')
- up4 = DeConv2d(conv5, int(512 * depth), (3, 3), (2, 2), name='deconv4')
- up4 = ConcatLayer([up4, conv4], 3, name='concat4')
- conv4 = Conv2d(up4, int(512 * depth), (3, 3), act=None, padding=pad, W_init=w_init, b_init=b_init, name='uconv4_1')
- conv4 = Conv2d(conv4, int(512 * depth), (3, 3), act=None, padding=pad, W_init=w_init, b_init=b_init, name='uconv4_2')
- up3 = DeConv2d(conv4, int(256 * depth), (3, 3), (2, 2), name='deconv3')
- up3 = BatchNormLayer(up3, act = activation, is_train=is_train, gamma_init=gamma_init, name='bn5')
- up3 = ConcatLayer([up3, conv3], 3, name='concat3')
- conv3 = Conv2d(up3, int(256 * depth), (3, 3), act=None, padding=pad, W_init=w_init, b_init=b_init, name='uconv3_1')
- conv3 = Conv2d(conv3, int(256 * depth), (3, 3), act=None, padding=pad, W_init=w_init, b_init=b_init, name='uconv3_2')
- up2 = DeConv2d(conv3, int(128 * depth), (3, 3), (2, 2), name='deconv2')
- up2 = BatchNormLayer(up2, act = activation, is_train=is_train, gamma_init=gamma_init, name='bn6')
- up2 = ConcatLayer([up2, conv2], 3, name='concat2')
- conv2 = Conv2d(up2, int(128 * depth), (3, 3), act=None, padding=pad, W_init=w_init, b_init=b_init, name='uconv2_1')
- conv2 = Conv2d(conv2, int(128 * depth), (3, 3), act=None, padding=pad, W_init=w_init, b_init=b_init, name='uconv2_2')
- up1 = DeConv2d(conv2, int(64 * depth), (3, 3), (2, 2), name='deconv1')
- up1 = BatchNormLayer(up1, act = activation, is_train=is_train, gamma_init=gamma_init, name='bn7')
- up1 = ConcatLayer([up1, conv1] , 3, name='concat1')
- conv1 = Conv2d(up1, int(64 * depth), (3, 3), act=None, padding=pad, W_init=w_init, b_init=b_init, name='uconv1_1')
- conv1 = Conv2d(conv1, int(64 * depth), (3, 3), act=None, padding=pad, W_init=w_init, b_init=b_init, name='uconv1_2')
- b_init = tf.random_uniform_initializer(minval = 0, maxval = n_out)
- conv1 = Conv2d(conv1, n_out, (1, 1), act=None, padding=pad, W_init=w_init, b_init=b_init, name='uconv1')
- return conv1
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement