-
Notifications
You must be signed in to change notification settings - Fork 24
/
Copy pathlayer.py
79 lines (64 loc) · 2.95 KB
/
layer.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
from tensorflow.contrib.layers.python import layers as tf_layers
from tensorflow.python.platform import flags
import tensorflow as tf
import tensorflow.contrib.slim as slim
def concat2d(x1,x2):
""" concatenation without offset check"""
x1_shape = tf.shape(x1)
x2_shape = tf.shape(x2)
try:
tf.equal(x1_shape[0:-2], x2_shape[0: -2])
except:
print("x1_shape: %s"%str(x1.get_shape().as_list()))
print("x2_shape: %s"%str(x2.get_shape().as_list()))
raise ValueError("Cannot concatenate tensors with different shape, igonoring feature map depth")
return tf.concat([x1, x2], 3)
def normalize(inp, activation, reuse=tf.AUTO_REUSE, scope='', form='batch_norm', is_training=True):
if form == 'batch_norm':
return tf_layers.batch_norm(inp, activation_fn=activation, reuse=reuse, scope=scope, is_training=is_training)
elif form == 'layer_norm':
return tf_layers.layer_norm(inp, activation_fn=activation, reuse=reuse, scope=scope)
elif form == 'None':
if activation is not None:
return activation(inp)
else:
return inp
def conv_block(inp, cweight, bweight, scope='', bn=True, is_training=True):
""" Perform, conv, batch norm, nonlinearity, and max pool """
conv = tf.nn.conv2d(inp, cweight, strides=[1, 1, 1, 1], padding='SAME')
conv = tf.nn.bias_add(conv, bweight)
if bn == True:
normed = normalize(conv, tf.nn.relu, scope=scope, is_training=is_training)
return normed
else:
return conv
# relu = tf.nn.leaky_relu(normed)
# normalize = batch_norm(relu, True)
def deconv_block(inp, cweight, bweight, scope='', is_training=True):
# x_shape = tf.shape(inp)
x_shape = inp.get_shape().as_list()
output_shape = tf.stack([x_shape[0], x_shape[1]*2, x_shape[2]*2, x_shape[3]//2])
deconv = tf.nn.conv2d_transpose(inp, cweight, output_shape, strides=[1,2,2,1], padding='SAME')
deconv = tf.nn.bias_add(deconv, bweight)
normed = normalize(deconv, tf.nn.relu, scope=scope, is_training=is_training)
return normed
def max_pool(x, filter_height, filter_width, stride_y, stride_x, padding='SAME'):
"""Create a max pooling layer."""
return tf.nn.max_pool(x, ksize=[1, filter_height, filter_width, 1], strides=[1, stride_y, stride_x, 1], padding=padding)
def lrn(x, radius, alpha, beta, bias=1.0):
"""Create a local response normalization layer."""
return tf.nn.local_response_normalization(x, depth_radius=radius, alpha=alpha, beta=beta, bias=bias)
def dropout(x, keep_prob):
"""Create a dropout layer."""
return tf.nn.dropout(x, keep_prob)
def fc(x, wweight, bweight, activation=None):
"""Create a fully connected layer."""
act = tf.nn.xw_plus_b(x, wweight, bweight)
if activation is 'relu':
return tf.nn.relu(act)
elif activation is 'leaky_relu':
return tf.nn.leaky_relu(act)
elif activation is None:
return act
else:
raise NotImplementedError