Skip to content

Commit 0cf40ca

Browse files
committed
added name scope to layers
1 parent 5043e09 commit 0cf40ca

File tree

14 files changed

+368
-68
lines changed

14 files changed

+368
-68
lines changed

tensorgraph/layers/activation.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,8 @@ def _train_fprop(self, state_below):
1313

1414

1515
class LeakyRELU(Template):
16+
17+
@Template.init_name_scope
1618
def __init__(self, leak=0.2):
1719
self.leak = leak
1820

tensorgraph/layers/backbones.py

Lines changed: 34 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -35,11 +35,13 @@ def _test_fprop(self, state_below):
3535

3636

3737
class VGG16(BaseModel):
38-
'''
39-
REFERENCE: Very Deep Convolutional Networks for Large-Scale Image Recognition
40-
(https://arxiv.org/abs/1409.1556)
41-
'''
38+
39+
@BaseModel.init_name_scope
4240
def __init__(self, input_channels, input_shape):
41+
'''
42+
REFERENCE: Very Deep Convolutional Networks for Large-Scale Image Recognition
43+
(https://arxiv.org/abs/1409.1556)
44+
'''
4345
layers = []
4446
# block 1
4547
layers.append(Conv2D(input_channels, num_filters=64, kernel_size=(3,3), stride=(1,1), padding='SAME'))
@@ -123,11 +125,13 @@ def __init__(self, input_channels, input_shape):
123125

124126

125127
class VGG19(BaseModel):
126-
'''
127-
REFERENCE: Very Deep Convolutional Networks for Large-Scale Image Recognition
128-
(https://arxiv.org/abs/1409.1556)
129-
'''
128+
129+
@BaseModel.init_name_scope
130130
def __init__(self, input_channels, input_shape):
131+
'''
132+
REFERENCE: Very Deep Convolutional Networks for Large-Scale Image Recognition
133+
(https://arxiv.org/abs/1409.1556)
134+
'''
131135
layers = []
132136
# block 1
133137
layers.append(Conv2D(input_channels, num_filters=64, kernel_size=(3,3), stride=(1,1), padding='SAME'))
@@ -223,12 +227,13 @@ def __init__(self, input_channels, input_shape):
223227

224228

225229
class ResNetBase(BaseModel):
226-
'''
227-
REFERENCE: Deep Residual Learning for Image Recognition (https://arxiv.org/abs/1512.03385)
228-
'''
229230

231+
@BaseModel.init_name_scope
230232
def __init__(self, input_channels, input_shape, config):
231-
'''config (list of ints): a list of 4 number of layers for each identity block
233+
'''
234+
REFERENCE: Deep Residual Learning for Image Recognition (https://arxiv.org/abs/1512.03385)
235+
PARAMS:
236+
config (list of ints): a list of 4 number of layers for each identity block
232237
'''
233238

234239
layers = []
@@ -276,12 +281,13 @@ def __init__(self, input_channels, input_shape, config):
276281

277282

278283
class ResNetSmall(ResNetBase):
279-
'''
280-
REFERENCE: Deep Residual Learning for Image Recognition (https://arxiv.org/abs/1512.03385)
281-
'''
282284

285+
@ResNetBase.init_name_scope
283286
def __init__(self, input_channels, input_shape, config):
284-
'''config (list of ints): a list of 2 number of layers for each identity block
287+
'''
288+
REFERENCE: Deep Residual Learning for Image Recognition (https://arxiv.org/abs/1512.03385)
289+
PARAMS:
290+
config (list of ints): a list of 2 number of layers for each identity block
285291
'''
286292
layers = []
287293
layers.append(Conv2D(input_channels, num_filters=64, kernel_size=(7,7), stride=(2,2), padding='SAME'))
@@ -329,10 +335,8 @@ def __init__(self, input_channels, input_shape):
329335

330336

331337
class ShortCutBlock(BaseModel):
332-
'''
333-
REFERENCE: Deep Residual Learning for Image Recognition (https://arxiv.org/abs/1512.03385)
334-
'''
335338

339+
@BaseModel.init_name_scope
336340
def __init__(self, input_channels, input_shape, filters, kernel_size, stride):
337341
'''
338342
DESCRIPTION:
@@ -377,6 +381,8 @@ def __init__(self, input_channels, input_shape, filters, kernel_size, stride):
377381

378382

379383
class IdentityBlock(BaseModel):
384+
385+
@BaseModel.init_name_scope
380386
def __init__(self, input_channels, input_shape, nlayers=2, filters=[32, 64]):
381387
'''
382388
DESCRIPTION:
@@ -421,6 +427,7 @@ def identity_layer(in_hn, shape):
421427

422428
class DenseBlock(BaseModel):
423429

430+
@BaseModel.init_name_scope
424431
def __init__(self, input_channels, input_shape, growth_rate, nlayers):
425432
'''
426433
DESCRIPTION:
@@ -455,10 +462,11 @@ def _conv_layer(in_hn, shape, in_channel):
455462

456463
class TransitionLayer(BaseModel):
457464

465+
@BaseModel.init_name_scope
458466
def __init__(self, input_channels, input_shape):
459467
'''
460468
DESCRIPTION:
461-
The transition layer of dense net (Densely Connected Convolutional Networks https://arxiv.org/abs/1608.06993)
469+
The transition layer of densenet (Densely Connected Convolutional Networks https://arxiv.org/abs/1608.06993)
462470
'''
463471
layers = []
464472
layers.append(Conv2D(input_channels, num_filters=input_channels, kernel_size=(1,1), stride=(1,1), padding='SAME'))
@@ -475,11 +483,11 @@ def __init__(self, input_channels, input_shape):
475483

476484

477485
class DenseNet(BaseModel):
478-
'''
479-
REFERENCE: Densely Connected Convolutional Networks (https://arxiv.org/abs/1608.06993)
480-
'''
486+
487+
@BaseModel.init_name_scope
481488
def __init__(self, input_channels, input_shape, ndense=3, growth_rate=12, nlayer1blk=12):
482489
'''
490+
REFERENCE: Densely Connected Convolutional Networks (https://arxiv.org/abs/1608.06993)
483491
PARAMS:
484492
ndense (int): number of dense blocks
485493
nlayer1blk (int): number of layers in one block, one layer refers to
@@ -504,7 +512,7 @@ def __init__(self, input_channels, input_shape, ndense=3, growth_rate=12, nlayer
504512

505513
dense = DenseBlock(transit.output_channels, transit.output_shape, growth_rate, nlayer1blk)
506514
layers.append(dense)
507-
layers.append(AvgPooling(poolsize=dense.output_shape, stride=(1,1), padding='VALID'))
515+
# layers.append(AvgPooling(poolsize=dense.output_shape, stride=(1,1), padding='VALID'))
508516

509517
assert np.prod(dense.output_shape) > 0, 'output shape {} is <= 0'.format(dense.output_shape)
510518
self.startnode = StartNode(input_vars=[None])
@@ -529,6 +537,8 @@ def __init__(self, input_channels, input_shape, ndense=3, growth_rate=12, nlayer
529537

530538

531539
class UNet(BaseModel):
540+
541+
@BaseModel.init_name_scope
532542
def __init__(self, input_channels, input_shape):
533543

534544
def _encode_block(in_hn, shape, in_ch, out_ch):

tensorgraph/layers/conv.py

Lines changed: 33 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,8 @@
55

66

77
class MaxPooling(Template):
8+
9+
@Template.init_name_scope
810
def __init__(self, poolsize=(2, 2), stride=(1,1), padding='VALID'):
911
'''
1012
DESCRIPTION:
@@ -29,6 +31,8 @@ def _train_fprop(self, state_below):
2931

3032

3133
class MaxPooling3D(Template):
34+
35+
@Template.init_name_scope
3236
def __init__(self, poolsize=(2,2,2), stride=(1,1,1), padding='VALID'):
3337
'''
3438
DESCRIPTION:
@@ -54,6 +58,8 @@ def _train_fprop(self, state_below):
5458

5559

5660
class AvgPooling(Template):
61+
62+
@Template.init_name_scope
5763
def __init__(self, poolsize=(2, 2), stride=(1,1), padding='VALID'):
5864
'''
5965
DESCRIPTION:
@@ -78,6 +84,8 @@ def _train_fprop(self, state_below):
7884

7985

8086
class Conv2D(Template):
87+
88+
@Template.init_name_scope
8189
def __init__(self, input_channels, num_filters, kernel_size=(3,3), stride=(1,1),
8290
filter=None, b=None, padding='VALID', stddev=0.1):
8391
'''
@@ -87,37 +95,38 @@ def __init__(self, input_channels, num_filters, kernel_size=(3,3), stride=(1,1),
8795
or
8896
"VALID" padding is always 0
8997
'''
90-
self.input_channels = input_channels
91-
self.num_filters = num_filters
92-
self.kernel_size = kernel_size
93-
self.stride = stride
94-
self.padding = padding
95-
96-
self.filter_shape = self.kernel_size + (self.input_channels, self.num_filters)
97-
self.filter = filter
98-
if self.filter is None:
99-
self.filter = tf.Variable(tf.random_normal(self.filter_shape, stddev=stddev),
100-
name=self.__class__.__name__ + '_filter')
101-
102-
self.b = b
103-
if self.b is None:
104-
self.b = tf.Variable(tf.zeros([self.num_filters]), name=self.__class__.__name__ + '_b')
98+
with tf.name_scope(self.__class__.__name__) as self.scope:
99+
self.input_channels = input_channels
100+
self.num_filters = num_filters
101+
self.kernel_size = kernel_size
102+
self.stride = stride
103+
self.padding = padding
104+
105+
self.filter_shape = self.kernel_size + (self.input_channels, self.num_filters)
106+
self.filter = filter
107+
if self.filter is None:
108+
self.filter = tf.Variable(tf.random_normal(self.filter_shape, stddev=stddev),
109+
name=self.__class__.__name__ + '_filter')
110+
111+
self.b = b
112+
if self.b is None:
113+
self.b = tf.Variable(tf.zeros([self.num_filters]), name=self.__class__.__name__ + '_b')
105114

106115
def _train_fprop(self, state_below):
107-
'''
108-
state_below: (b, h, w, c)
116+
'''state_below: (b, h, w, c)
109117
'''
110118
conv_out = tf.nn.conv2d(state_below, self.filter, strides=(1,)+tuple(self.stride)+(1,),
111-
padding=self.padding, data_format='NHWC')
119+
padding=self.padding, data_format='NHWC')
112120
return tf.nn.bias_add(conv_out, self.b)
113121

114122
@property
115123
def _variables(self):
116124
return [self.filter, self.b]
117125

118126

119-
120127
class Depthwise_Conv2D(Template):
128+
129+
@Template.init_name_scope
121130
def __init__(self, input_channels, num_filters, kernel_size=(3,3), stride=(1,1),
122131
filter=None, b=None, padding='VALID', stddev=0.1):
123132
'''
@@ -167,6 +176,7 @@ def _variables(self):
167176

168177
class ZeroPad(Template):
169178

179+
@Template.init_name_scope
170180
def __init__(self, pad_along_height=[0,0], pad_along_width=[0,0]):
171181
'''
172182
PARAM:
@@ -182,16 +192,15 @@ def __init__(self, pad_along_height=[0,0], pad_along_width=[0,0]):
182192
assert isinstance(pad_along_width, (tuple, list)) and len(pad_along_width) == 2
183193
self.pad = [[0,0],pad_along_height, pad_along_width,[0,0]]
184194

185-
186195
def _train_fprop(self, state_below):
187196
'''state_below: (b, h, w, c)
188197
'''
189198
return tf.pad(state_below, self.pad)
190199

191200

192-
193-
194201
class Conv2D_Transpose(Template):
202+
203+
@Template.init_name_scope
195204
def __init__(self, input_channels, num_filters, output_shape, kernel_size=(3,3), stride=(1,1),
196205
filter=None, b=None, padding='VALID', stddev=0.1):
197206
'''
@@ -237,6 +246,8 @@ def _variables(self):
237246

238247

239248
class Conv3D(Template):
249+
250+
@Template.init_name_scope
240251
def __init__(self, input_channels, num_filters, kernel_size=(3,3,3), stride=(1,1,1),
241252
filter=None, b=None, padding='VALID', stddev=0.1):
242253
'''

tensorgraph/layers/linear.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@
44

55
class Linear(Template):
66

7+
@Template.init_name_scope
78
def __init__(self, prev_dim=None, this_dim=None, W=None, b=None, stddev=0.1):
89
"""
910
DESCRIPTION:
@@ -36,6 +37,7 @@ def _variables(self):
3637

3738
class LinearMasked(Template):
3839

40+
@Template.init_name_scope
3941
def __init__(self, prev_dim=None, this_dim=None, W=None, b=None, mask=None, stddev=0.1):
4042
"""
4143
DESCRIPTION:
@@ -72,6 +74,7 @@ def _variables(self):
7274

7375
class SparseLinear(Template):
7476

77+
@Template.init_name_scope
7578
def __init__(self, prev_dim=None, this_dim=None, W=None, b=None, batchsize=None, stddev=0.1):
7679
"""
7780
DESCRIPTION:

tensorgraph/layers/merge.py

Lines changed: 8 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,8 @@
11

22
import tensorflow as tf
3+
from .template import Template
34

4-
5-
class Merge(object):
5+
class Merge(Template):
66
'''
77
Merge layer is used to merge the list of states from layer below into one state
88
'''
@@ -25,6 +25,8 @@ def _variables(self):
2525

2626

2727
class Concat(Merge):
28+
29+
@Merge.init_name_scope
2830
def __init__(self, axis=1):
2931
self.axis = axis
3032

@@ -56,6 +58,8 @@ def _train_fprop(self, state_list):
5658

5759

5860
class Select(Merge):
61+
62+
@Merge.init_name_scope
5963
def __init__(self, index=0):
6064
self.index = index
6165

@@ -64,6 +68,8 @@ def _train_fprop(self, state_list):
6468

6569

6670
class SequenceMask(Merge):
71+
72+
@Merge.init_name_scope
6773
def __init__(self, maxlen):
6874
'''
6975
DESCRIPTION:

0 commit comments

Comments
 (0)