在写论文过程中我们需要一个清晰的网络结构图,而生成漂亮的立体图像能给我们的论文加分不少。生成的图像如图所示。
更多方法参考:22 款设计和可视化神经网络的工具
具体实现过程如下:
1.下载PlotNeuralNet源代码链接
2.按照里面的README.md文件里的内容进行操作。
3.编写py文件。例子如下:
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 | import sys sys.path.append('../') from pycore.tikzeng import * # defined your arch arch = [ to_head( '..' ), to_cor(), to_begin(), to_input("1.jpg",to="(-13,0,0)",width=16,height=16), to_input("2.jpg",to="(-7,0,0)",width=8, height=8 ), #conv1 #to_ConvConvRelu("conv0", s_filer=224,n_filer= (64,64), offset="(-3,0,0)", to="(0,0,0)", height=120, depth=120, width=(2,2/10),caption="ConvRelu1"), to_ConvConvRelu(name='conv1', s_filer=224, n_filer=(64, 64), offset="(0,0,0)", to="(0,0,0)", width=(2, 2), height=120, depth=120,caption='ConvRelu1'), # to_ConvConvRelu("conv2", 224, n_filer =(64,64), offset="(0,0,0)", to="(conv1-east)", height=120, depth=120, width=(2,2/10)), to_Pool("pool1", offset="(0,0,0)", to="(conv1-east)",height=90, depth=90, width=1,caption='Maxpool1'), #conv2 # to_ConvConvRelu("conv3", 112, (128,128), offset="(6,0,0)", to="(pool1-east)", height=100, depth=100, width=(4,4/10)), to_ConvConvRelu("conv2", 112, (128,128), offset="(6,0,0)", to="(pool1-east)", height=100, depth=100, width=(4,4),caption="ConvRelu2" ), to_connection( "pool1", "conv2"), to_Pool("pool2", offset="(0,0,0)", to="(conv2-east)",height=70, depth=70, width=1,caption='Maxpool2'), #conv3 to_ConvConvRelu("conv6", 56, (256,''), offset="(5,0,0)", to="(pool2-east)", height=80, depth=80, width=(6,'') ), to_connection( "pool2", "conv6"), to_ConvConvRelu("conv7", 56, (256,''), offset="(0,0,0)", to="(conv6-east)", height=80, depth=80, width=(6,''),caption="ConvRelu3"), to_ConvConvRelu("conv8", 56, (256,''), offset="(0,0,0)", to="(conv7-east)", height=80, depth=80, width=(6,'') ), to_Pool("pool3", offset="(0,0,0)", to="(conv8-east)",height=50, depth=50, width=1,caption='Maxpool3'), #conv4 to_ConvConvRelu("conv9", 28, (512,''), offset="(4,0,0)", to="(pool3-east)", height=60, depth=60, width=(8,'')), to_connection( "pool3", "conv9"), to_ConvConvRelu("conv10", 28, (512,''), offset="(0,0,0)", to="(conv9-east)", height=60, depth=60, width=(8,''),caption="ConvRelu4"), to_ConvConvRelu("conv11", 28, (512,''), offset="(0,0,0)", to="(conv10-east)", height=60, depth=60, width=(8,'')), to_Pool("pool4", offset="(0,0,0)", to="(conv11-east)",height=30, depth=30, width=1,caption='Maxpool4'), #conv5 to_ConvConvRelu("conv12", 14, (512,''), offset="(4,0,0)", to="(pool4-east)", height=40, depth=40, width=(10,'')), to_connection( "pool4", "conv12"), to_ConvConvRelu("conv13", 14, (512,''), offset="(0,0,0)", to="(conv12-east)", height=40, depth=40, width=(10,''),caption="ConvRelu5"), to_ConvConvRelu("conv14", 14, (512,''), offset="(0,0,0)", to="(conv13-east)", height=40, depth=40, width=(10,'')), to_Pool("pool5", offset="(0,0,0)", to="(conv14-east)", height=20, depth=20, width=1,caption='Maxpool5'), #avgpool to_Pool("pool6", offset="(3,0,0)", to="(pool5-east)", height=5, depth=5, width=1,caption="AvgPool"), to_connection( "pool5", "pool6"), #to_UnPool("ss", offset="(3,0,0)", to="(pool5-east)", width=1, height=32, depth=32), #to_SoftMax("soft1", 4096 ,"(3,0,0)", "(pool6-east)", width=1.5, height=3, depth=100, opacity=0.8,caption="fc6" ), #to_connection("pool6", "soft1"), #to_SoftMax("soft2", 4096 ,"(3,0,0)", "(soft1-east)",width=1.5, height=3, depth=100, caption="fc7" ), #to_connection("soft1", "soft2"), to_SoftMax("soft1", 512 ,"(4,0,0)", "(pool6-east)", width=1.5, height=3, depth=40,caption="FC6" ), to_connection("pool6", "soft1"), to_SoftMax("soft2",5,"(4,0,0)","(soft1-east)",width=1,height=2,depth=20,caption="FC7"), to_connection("soft1", "soft2"), to_end() ] #offset(x,y,z)表示与上一层的相对坐标位置关系 #to_ConvConvRelu("conv14", 14, (512,''), offset="(0,0,0)", to="(conv13-east)", height=40, depth=40, width=(10,'')),用法详解: #(512,512),(10,10)表示同时产生两个convrelu层 #(512,512),(10,0.1)表示产生一个convrelu层,其中relu厚度为0.1 #(512,''),(10,'')表示只产生一个convrelu层,relu层厚度为默认厚度 def main(): namefile = str(sys.argv[0]).split('.')[0] to_generate(arch, namefile + '.tex' ) if __name__ == '__main__': main() |
下面主要解释一下代码的用法。
1 | to_input("1.jpg",to="(-13,0,0)",width=16,height=16) |
“1.jpg” ,表示输入图片的路径,
to="(-13,0,0)" ,表示输入图片的坐标(x,y,z),
width=16,height=16,表示输入图片的宽和高。
1 2 | to_ConvConvRelu(name='conv1', s_filer=224, n_filer=(64, 64), offset="(0,0,0)", to="(0,0,0)", width=(2, 2), height=120, depth=120,caption='ConvRelu1'), to_ConvConvRelu("conv2", 112, (128,128), offset="(6,0,0)", to="(pool1-east)", height=100, depth=100, width=(4,4),caption="ConvRelu2" ), |
1 2 3 4 5 6 7 8 9 10 11 12 13 | name='conv1',表示层名, s_filer=224, 表示对应位置的图像大小。 n_filer=(64, 64),表示卷积核的数量,即输出通道数。 offset="(0,0,0)",表示与前一层分别在x,y,z方向的距离。 to="(0,0,0)",表示在x,y,z方向的坐标, offset="(6,0,0)", to="(pool1-east)",表示与pool1层相邻,x方向距离为6。 width=(2, 2),表示厚度。 height=120, depth=120,表示长宽。 caption='ConvRelu1',表示备注信息。 对 to_ConvConvRelu()的用法。 n_filer=(512,512),width=(10,10),会同时产生两个convrelu层,relu层厚度为默认厚度 n_filer=(512,512),width=(10,0.1)表示产生一个convrelu层,其中relu厚度为0.1 n_filer=(512,''),width=(10,'')表示只产生一个convrelu层,relu层厚度为默认厚度 |
1 | to_Pool("pool1", offset="(0,0,0)", to="(conv1-east)",height=90, depth=90, width=1,caption='Maxpool1') |
1 | offset="(0,0,0)",to="(conv1-east)",表示与conv1层相邻,距离为0。 |
1 | to_connection( "pool4", "conv12"), |
表示从pool4向conv12 层画箭头连接。
1 | to_SoftMax("soft1", 512 ,"(4,0,0)", "(pool6-east)", width=1.5, height=3, depth=40,caption="FC6" ), |
用法基本一样