Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- v0.0.1
- %660 = fn (%data: Tensor[(1, 3, 299, 299), float32]) -> Tensor[(1, 1000), float32] {
- %1 = fn (%p0: Tensor[(1, 1000), float32], __dict__=meta[StrMap][0]) -> Tensor[(1, 1000), float32] {
- %0 = nn.softmax(%p0)
- %0
- }
- %4 = fn (%p01: Tensor[(1, 2048), float32], %p1: Tensor[(1000, 2048), float32], %p2: Tensor[(1000,), float32], __dict__=meta[StrMap][1]) -> Tensor[(1, 1000), float32] {
- %2 = nn.dense(%p01, %p1, units=1000)
- %3 = nn.bias_add(%2, %p2)
- %3
- }
- %6 = fn (%p02: Tensor[(1, 2048, 1, 1), float32], __dict__=meta[StrMap][2]) -> Tensor[(1, 2048), float32] {
- %5 = nn.batch_flatten(%p02)
- %5
- }
- %8 = fn (%p03: Tensor[(1, 2048, 8, 8), float32], __dict__=meta[StrMap][3]) -> Tensor[(1, 2048, 1, 1), float32] {
- %7 = nn.avg_pool2d(%p03, pool_size=[8, 8], count_include_pad=True)
- %7
- }
- %11 = fn (%p04: Tensor[(1, 320, 8, 8), float32], %p11: Tensor[(1, 384, 8, 8), float32], %p21: Tensor[(1, 384, 8, 8), float32], %p3: Tensor[(1, 384, 8, 8), float32], %p4: Tensor[(1, 384, 8, 8), float32], %p5: Tensor[(1, 192, 8, 8), float32], __dict__=meta[StrMap][4]) -> Tensor[(1, 2048, 8, 8), float32] {
- %9 = (%p04, %p11, %p21, %p3, %p4, %p5)
- %10 = concatenate(%9, axis=1)
- %10
- }
- %16 = fn (%p05: Tensor[(1, 2048, 8, 8), float32], %p12: Tensor[(320, 2048, 1, 1), float32], %p22: Tensor[(320, 1, 1), float32], %p31: Tensor[(320, 1, 1), float32], __dict__=meta[StrMap][5]) -> Tensor[(1, 320, 8, 8), float32] {
- %12 = nn.conv2d(%p05, %p12, channels=320, kernel_size=[1, 1])
- %13 = multiply(%12, %p22)
- %14 = add(%13, %p31)
- %15 = nn.relu(%14)
- %15
- }
- %19 = fn (%p06: Tensor[(1, 320, 8, 8), float32], %p13: Tensor[(1, 384, 8, 8), float32], %p23: Tensor[(1, 384, 8, 8), float32], %p32: Tensor[(1, 384, 8, 8), float32], %p41: Tensor[(1, 384, 8, 8), float32], %p51: Tensor[(1, 192, 8, 8), float32], __dict__=meta[StrMap][6]) -> Tensor[(1, 2048, 8, 8), float32] {
- %17 = (%p06, %p13, %p23, %p32, %p41, %p51)
- %18 = concatenate(%17, axis=1)
- %18
- }
- %24 = fn (%p07: Tensor[(1, 1280, 8, 8), float32], %p14: Tensor[(320, 1280, 1, 1), float32], %p24: Tensor[(320, 1, 1), float32], %p33: Tensor[(320, 1, 1), float32], __dict__=meta[StrMap][7]) -> Tensor[(1, 320, 8, 8), float32] {
- %20 = nn.conv2d(%p07, %p14, channels=320, kernel_size=[1, 1])
- %21 = multiply(%20, %p24)
- %22 = add(%21, %p33)
- %23 = nn.relu(%22)
- %23
- }
- %27 = fn (%p08: Tensor[(1, 320, 8, 8), float32], %p15: Tensor[(1, 192, 8, 8), float32], %p25: Tensor[(1, 768, 8, 8), float32], __dict__=meta[StrMap][8]) -> Tensor[(1, 1280, 8, 8), float32] {
- %25 = (%p08, %p15, %p25)
- %26 = concatenate(%25, axis=1)
- %26
- }
- %32 = fn (%p09: Tensor[(1, 192, 17, 17), float32], %p16: Tensor[(320, 192, 3, 3), float32], %p26: Tensor[(320, 1, 1), float32], %p34: Tensor[(320, 1, 1), float32], __dict__=meta[StrMap][9]) -> Tensor[(1, 320, 8, 8), float32] {
- %28 = nn.conv2d(%p09, %p16, strides=[2, 2], channels=320, kernel_size=[3, 3])
- %29 = multiply(%28, %p26)
- %30 = add(%29, %p34)
- %31 = nn.relu(%30)
- %31
- }
- %37 = fn (%p010: Tensor[(1, 768, 17, 17), float32], %p17: Tensor[(192, 768, 1, 1), float32], %p27: Tensor[(192, 1, 1), float32], %p35: Tensor[(192, 1, 1), float32], __dict__=meta[StrMap][10]) -> Tensor[(1, 192, 17, 17), float32] {
- %33 = nn.conv2d(%p010, %p17, channels=192, kernel_size=[1, 1])
- %34 = multiply(%33, %p27)
- %35 = add(%34, %p35)
- %36 = nn.relu(%35)
- %36
- }
- %40 = fn (%p011: Tensor[(1, 192, 17, 17), float32], %p18: Tensor[(1, 192, 17, 17), float32], %p28: Tensor[(1, 192, 17, 17), float32], %p36: Tensor[(1, 192, 17, 17), float32], __dict__=meta[StrMap][11]) -> Tensor[(1, 768, 17, 17), float32] {
- %38 = (%p011, %p18, %p28, %p36)
- %39 = concatenate(%38, axis=1)
- %39
- }
- %45 = fn (%p012: Tensor[(1, 768, 17, 17), float32], %p19: Tensor[(192, 768, 1, 1), float32], %p29: Tensor[(192, 1, 1), float32], %p37: Tensor[(192, 1, 1), float32], __dict__=meta[StrMap][12]) -> Tensor[(1, 192, 17, 17), float32] {
- %41 = nn.conv2d(%p012, %p19, channels=192, kernel_size=[1, 1])
- %42 = multiply(%41, %p29)
- %43 = add(%42, %p37)
- %44 = nn.relu(%43)
- %44
- }
- %48 = fn (%p013: Tensor[(1, 192, 17, 17), float32], %p110: Tensor[(1, 192, 17, 17), float32], %p210: Tensor[(1, 192, 17, 17), float32], %p38: Tensor[(1, 192, 17, 17), float32], __dict__=meta[StrMap][13]) -> Tensor[(1, 768, 17, 17), float32] {
- %46 = (%p013, %p110, %p210, %p38)
- %47 = concatenate(%46, axis=1)
- %47
- }
- %53 = fn (%p014: Tensor[(1, 768, 17, 17), float32], %p111: Tensor[(192, 768, 1, 1), float32], %p211: Tensor[(192, 1, 1), float32], %p39: Tensor[(192, 1, 1), float32], __dict__=meta[StrMap][14]) -> Tensor[(1, 192, 17, 17), float32] {
- %49 = nn.conv2d(%p014, %p111, channels=192, kernel_size=[1, 1])
- %50 = multiply(%49, %p211)
- %51 = add(%50, %p39)
- %52 = nn.relu(%51)
- %52
- }
- %56 = fn (%p015: Tensor[(1, 192, 17, 17), float32], %p112: Tensor[(1, 192, 17, 17), float32], %p212: Tensor[(1, 192, 17, 17), float32], %p310: Tensor[(1, 192, 17, 17), float32], __dict__=meta[StrMap][15]) -> Tensor[(1, 768, 17, 17), float32] {
- %54 = (%p015, %p112, %p212, %p310)
- %55 = concatenate(%54, axis=1)
- %55
- }
- %61 = fn (%p016: Tensor[(1, 768, 17, 17), float32], %p113: Tensor[(192, 768, 1, 1), float32], %p213: Tensor[(192, 1, 1), float32], %p311: Tensor[(192, 1, 1), float32], __dict__=meta[StrMap][16]) -> Tensor[(1, 192, 17, 17), float32] {
- %57 = nn.conv2d(%p016, %p113, channels=192, kernel_size=[1, 1])
- %58 = multiply(%57, %p213)
- %59 = add(%58, %p311)
- %60 = nn.relu(%59)
- %60
- }
- %64 = fn (%p017: Tensor[(1, 192, 17, 17), float32], %p114: Tensor[(1, 192, 17, 17), float32], %p214: Tensor[(1, 192, 17, 17), float32], %p312: Tensor[(1, 192, 17, 17), float32], __dict__=meta[StrMap][17]) -> Tensor[(1, 768, 17, 17), float32] {
- %62 = (%p017, %p114, %p214, %p312)
- %63 = concatenate(%62, axis=1)
- %63
- }
- %69 = fn (%p018: Tensor[(1, 768, 17, 17), float32], %p115: Tensor[(192, 768, 1, 1), float32], %p215: Tensor[(192, 1, 1), float32], %p313: Tensor[(192, 1, 1), float32], __dict__=meta[StrMap][18]) -> Tensor[(1, 192, 17, 17), float32] {
- %65 = nn.conv2d(%p018, %p115, channels=192, kernel_size=[1, 1])
- %66 = multiply(%65, %p215)
- %67 = add(%66, %p313)
- %68 = nn.relu(%67)
- %68
- }
- %72 = fn (%p019: Tensor[(1, 384, 17, 17), float32], %p116: Tensor[(1, 96, 17, 17), float32], %p216: Tensor[(1, 288, 17, 17), float32], __dict__=meta[StrMap][19]) -> Tensor[(1, 768, 17, 17), float32] {
- %70 = (%p019, %p116, %p216)
- %71 = concatenate(%70, axis=1)
- %71
- }
- %77 = fn (%p020: Tensor[(1, 288, 35, 35), float32], %p117: Tensor[(384, 288, 3, 3), float32], %p217: Tensor[(384, 1, 1), float32], %p314: Tensor[(384, 1, 1), float32], __dict__=meta[StrMap][20]) -> Tensor[(1, 384, 17, 17), float32] {
- %73 = nn.conv2d(%p020, %p117, strides=[2, 2], channels=384, kernel_size=[3, 3])
- %74 = multiply(%73, %p217)
- %75 = add(%74, %p314)
- %76 = nn.relu(%75)
- %76
- }
- %80 = fn (%p021: Tensor[(1, 64, 35, 35), float32], %p118: Tensor[(1, 64, 35, 35), float32], %p218: Tensor[(1, 96, 35, 35), float32], %p315: Tensor[(1, 64, 35, 35), float32], __dict__=meta[StrMap][21]) -> Tensor[(1, 288, 35, 35), float32] {
- %78 = (%p021, %p118, %p218, %p315)
- %79 = concatenate(%78, axis=1)
- %79
- }
- %85 = fn (%p022: Tensor[(1, 288, 35, 35), float32], %p119: Tensor[(64, 288, 1, 1), float32], %p219: Tensor[(64, 1, 1), float32], %p316: Tensor[(64, 1, 1), float32], __dict__=meta[StrMap][22]) -> Tensor[(1, 64, 35, 35), float32] {
- %81 = nn.conv2d(%p022, %p119, channels=64, kernel_size=[1, 1])
- %82 = multiply(%81, %p219)
- %83 = add(%82, %p316)
- %84 = nn.relu(%83)
- %84
- }
- %88 = fn (%p023: Tensor[(1, 64, 35, 35), float32], %p120: Tensor[(1, 64, 35, 35), float32], %p220: Tensor[(1, 96, 35, 35), float32], %p317: Tensor[(1, 64, 35, 35), float32], __dict__=meta[StrMap][23]) -> Tensor[(1, 288, 35, 35), float32] {
- %86 = (%p023, %p120, %p220, %p317)
- %87 = concatenate(%86, axis=1)
- %87
- }
- %93 = fn (%p024: Tensor[(1, 256, 35, 35), float32], %p121: Tensor[(64, 256, 1, 1), float32], %p221: Tensor[(64, 1, 1), float32], %p318: Tensor[(64, 1, 1), float32], __dict__=meta[StrMap][24]) -> Tensor[(1, 64, 35, 35), float32] {
- %89 = nn.conv2d(%p024, %p121, channels=64, kernel_size=[1, 1])
- %90 = multiply(%89, %p221)
- %91 = add(%90, %p318)
- %92 = nn.relu(%91)
- %92
- }
- %96 = fn (%p025: Tensor[(1, 64, 35, 35), float32], %p122: Tensor[(1, 64, 35, 35), float32], %p222: Tensor[(1, 96, 35, 35), float32], %p319: Tensor[(1, 32, 35, 35), float32], __dict__=meta[StrMap][25]) -> Tensor[(1, 256, 35, 35), float32] {
- %94 = (%p025, %p122, %p222, %p319)
- %95 = concatenate(%94, axis=1)
- %95
- }
- %101 = fn (%p026: Tensor[(1, 192, 35, 35), float32], %p123: Tensor[(64, 192, 1, 1), float32], %p223: Tensor[(64, 1, 1), float32], %p320: Tensor[(64, 1, 1), float32], __dict__=meta[StrMap][26]) -> Tensor[(1, 64, 35, 35), float32] {
- %97 = nn.conv2d(%p026, %p123, channels=64, kernel_size=[1, 1])
- %98 = multiply(%97, %p223)
- %99 = add(%98, %p320)
- %100 = nn.relu(%99)
- %100
- }
- %103 = fn (%p027: Tensor[(1, 192, 71, 71), float32], __dict__=meta[StrMap][27]) -> Tensor[(1, 192, 35, 35), float32] {
- %102 = nn.max_pool2d(%p027, pool_size=[3, 3], strides=[2, 2])
- %102
- }
- %108 = fn (%p028: Tensor[(1, 80, 73, 73), float32], %p124: Tensor[(192, 80, 3, 3), float32], %p224: Tensor[(192, 1, 1), float32], %p321: Tensor[(192, 1, 1), float32], __dict__=meta[StrMap][28]) -> Tensor[(1, 192, 71, 71), float32] {
- %104 = nn.conv2d(%p028, %p124, channels=192, kernel_size=[3, 3])
- %105 = multiply(%104, %p224)
- %106 = add(%105, %p321)
- %107 = nn.relu(%106)
- %107
- }
- %113 = fn (%p029: Tensor[(1, 64, 73, 73), float32], %p125: Tensor[(80, 64, 1, 1), float32], %p225: Tensor[(80, 1, 1), float32], %p322: Tensor[(80, 1, 1), float32], __dict__=meta[StrMap][29]) -> Tensor[(1, 80, 73, 73), float32] {
- %109 = nn.conv2d(%p029, %p125, channels=80, kernel_size=[1, 1])
- %110 = multiply(%109, %p225)
- %111 = add(%110, %p322)
- %112 = nn.relu(%111)
- %112
- }
- %115 = fn (%p030: Tensor[(1, 64, 147, 147), float32], __dict__=meta[StrMap][30]) -> Tensor[(1, 64, 73, 73), float32] {
- %114 = nn.max_pool2d(%p030, pool_size=[3, 3], strides=[2, 2])
- %114
- }
- %120 = fn (%p031: Tensor[(1, 32, 147, 147), float32], %p126: Tensor[(64, 32, 3, 3), float32], %p226: Tensor[(64, 1, 1), float32], %p323: Tensor[(64, 1, 1), float32], __dict__=meta[StrMap][31]) -> Tensor[(1, 64, 147, 147), float32] {
- %116 = nn.conv2d(%p031, %p126, padding=[1, 1], channels=64, kernel_size=[3, 3])
- %117 = multiply(%116, %p226)
- %118 = add(%117, %p323)
- %119 = nn.relu(%118)
- %119
- }
- %125 = fn (%p032: Tensor[(1, 32, 149, 149), float32], %p127: Tensor[(32, 32, 3, 3), float32], %p227: Tensor[(32, 1, 1), float32], %p324: Tensor[(32, 1, 1), float32], __dict__=meta[StrMap][32]) -> Tensor[(1, 32, 147, 147), float32] {
- %121 = nn.conv2d(%p032, %p127, channels=32, kernel_size=[3, 3])
- %122 = multiply(%121, %p227)
- %123 = add(%122, %p324)
- %124 = nn.relu(%123)
- %124
- }
- %130 = fn (%p033: Tensor[(1, 3, 299, 299), float32], %p128: Tensor[(32, 3, 3, 3), float32], %p228: Tensor[(32, 1, 1), float32], %p325: Tensor[(32, 1, 1), float32], __dict__=meta[StrMap][33]) -> Tensor[(1, 32, 149, 149), float32] {
- %126 = nn.conv2d(%p033, %p128, strides=[2, 2], channels=32, kernel_size=[3, 3])
- %127 = multiply(%126, %p228)
- %128 = add(%127, %p325)
- %129 = nn.relu(%128)
- %129
- }
- %131 = %130(%data, meta[relay.Constant][0] // ty=Tensor[(32, 3, 3, 3), float32], meta[relay.Constant][1] // ty=Tensor[(32, 1, 1), float32], meta[relay.Constant][2] // ty=Tensor[(32, 1, 1), float32])
- %132 = %125(%131, meta[relay.Constant][3] // ty=Tensor[(32, 32, 3, 3), float32], meta[relay.Constant][4] // ty=Tensor[(32, 1, 1), float32], meta[relay.Constant][5] // ty=Tensor[(32, 1, 1), float32])
- %133 = %120(%132, meta[relay.Constant][6] // ty=Tensor[(64, 32, 3, 3), float32], meta[relay.Constant][7] // ty=Tensor[(64, 1, 1), float32], meta[relay.Constant][8] // ty=Tensor[(64, 1, 1), float32])
- %134 = %115(%133)
- %135 = %113(%134, meta[relay.Constant][9] // ty=Tensor[(80, 64, 1, 1), float32], meta[relay.Constant][10] // ty=Tensor[(80, 1, 1), float32], meta[relay.Constant][11] // ty=Tensor[(80, 1, 1), float32])
- %136 = %108(%135, meta[relay.Constant][12] // ty=Tensor[(192, 80, 3, 3), float32], meta[relay.Constant][13] // ty=Tensor[(192, 1, 1), float32], meta[relay.Constant][14] // ty=Tensor[(192, 1, 1), float32])
- %137 = %103(%136)
- %138 = %101(%137, meta[relay.Constant][15] // ty=Tensor[(64, 192, 1, 1), float32], meta[relay.Constant][16] // ty=Tensor[(64, 1, 1), float32], meta[relay.Constant][17] // ty=Tensor[(64, 1, 1), float32])
- %143 = fn (%p034: Tensor[(1, 48, 35, 35), float32], %p129: Tensor[(64, 48, 5, 5), float32], %p229: Tensor[(64, 1, 1), float32], %p326: Tensor[(64, 1, 1), float32], __dict__=meta[StrMap][34]) -> Tensor[(1, 64, 35, 35), float32] {
- %139 = nn.conv2d(%p034, %p129, padding=[2, 2], channels=64, kernel_size=[5, 5])
- %140 = multiply(%139, %p229)
- %141 = add(%140, %p326)
- %142 = nn.relu(%141)
- %142
- }
- %148 = fn (%p035: Tensor[(1, 192, 35, 35), float32], %p130: Tensor[(48, 192, 1, 1), float32], %p230: Tensor[(48, 1, 1), float32], %p327: Tensor[(48, 1, 1), float32], __dict__=meta[StrMap][35]) -> Tensor[(1, 48, 35, 35), float32] {
- %144 = nn.conv2d(%p035, %p130, channels=48, kernel_size=[1, 1])
- %145 = multiply(%144, %p230)
- %146 = add(%145, %p327)
- %147 = nn.relu(%146)
- %147
- }
- %149 = %148(%137, meta[relay.Constant][18] // ty=Tensor[(48, 192, 1, 1), float32], meta[relay.Constant][19] // ty=Tensor[(48, 1, 1), float32], meta[relay.Constant][20] // ty=Tensor[(48, 1, 1), float32])
- %150 = %143(%149, meta[relay.Constant][21] // ty=Tensor[(64, 48, 5, 5), float32], meta[relay.Constant][22] // ty=Tensor[(64, 1, 1), float32], meta[relay.Constant][23] // ty=Tensor[(64, 1, 1), float32])
- %155 = fn (%p036: Tensor[(1, 96, 35, 35), float32], %p131: Tensor[(96, 96, 3, 3), float32], %p231: Tensor[(96, 1, 1), float32], %p328: Tensor[(96, 1, 1), float32], __dict__=meta[StrMap][36]) -> Tensor[(1, 96, 35, 35), float32] {
- %151 = nn.conv2d(%p036, %p131, padding=[1, 1], channels=96, kernel_size=[3, 3])
- %152 = multiply(%151, %p231)
- %153 = add(%152, %p328)
- %154 = nn.relu(%153)
- %154
- }
- %160 = fn (%p037: Tensor[(1, 64, 35, 35), float32], %p132: Tensor[(96, 64, 3, 3), float32], %p232: Tensor[(96, 1, 1), float32], %p329: Tensor[(96, 1, 1), float32], __dict__=meta[StrMap][37]) -> Tensor[(1, 96, 35, 35), float32] {
- %156 = nn.conv2d(%p037, %p132, padding=[1, 1], channels=96, kernel_size=[3, 3])
- %157 = multiply(%156, %p232)
- %158 = add(%157, %p329)
- %159 = nn.relu(%158)
- %159
- }
- %165 = fn (%p038: Tensor[(1, 192, 35, 35), float32], %p133: Tensor[(64, 192, 1, 1), float32], %p233: Tensor[(64, 1, 1), float32], %p330: Tensor[(64, 1, 1), float32], __dict__=meta[StrMap][38]) -> Tensor[(1, 64, 35, 35), float32] {
- %161 = nn.conv2d(%p038, %p133, channels=64, kernel_size=[1, 1])
- %162 = multiply(%161, %p233)
- %163 = add(%162, %p330)
- %164 = nn.relu(%163)
- %164
- }
- %166 = %165(%137, meta[relay.Constant][24] // ty=Tensor[(64, 192, 1, 1), float32], meta[relay.Constant][25] // ty=Tensor[(64, 1, 1), float32], meta[relay.Constant][26] // ty=Tensor[(64, 1, 1), float32])
- %167 = %160(%166, meta[relay.Constant][27] // ty=Tensor[(96, 64, 3, 3), float32], meta[relay.Constant][28] // ty=Tensor[(96, 1, 1), float32], meta[relay.Constant][29] // ty=Tensor[(96, 1, 1), float32])
- %168 = %155(%167, meta[relay.Constant][30] // ty=Tensor[(96, 96, 3, 3), float32], meta[relay.Constant][31] // ty=Tensor[(96, 1, 1), float32], meta[relay.Constant][32] // ty=Tensor[(96, 1, 1), float32])
- %173 = fn (%p039: Tensor[(1, 192, 35, 35), float32], %p134: Tensor[(32, 192, 1, 1), float32], %p234: Tensor[(32, 1, 1), float32], %p331: Tensor[(32, 1, 1), float32], __dict__=meta[StrMap][39]) -> Tensor[(1, 32, 35, 35), float32] {
- %169 = nn.conv2d(%p039, %p134, channels=32, kernel_size=[1, 1])
- %170 = multiply(%169, %p234)
- %171 = add(%170, %p331)
- %172 = nn.relu(%171)
- %172
- }
- %175 = fn (%p040: Tensor[(1, 192, 35, 35), float32], __dict__=meta[StrMap][40]) -> Tensor[(1, 192, 35, 35), float32] {
- %174 = nn.avg_pool2d(%p040, pool_size=[3, 3], padding=[1, 1], count_include_pad=True)
- %174
- }
- %176 = %175(%137)
- %177 = %173(%176, meta[relay.Constant][33] // ty=Tensor[(32, 192, 1, 1), float32], meta[relay.Constant][34] // ty=Tensor[(32, 1, 1), float32], meta[relay.Constant][35] // ty=Tensor[(32, 1, 1), float32])
- %178 = %96(%138, %150, %168, %177)
- %179 = %93(%178, meta[relay.Constant][36] // ty=Tensor[(64, 256, 1, 1), float32], meta[relay.Constant][37] // ty=Tensor[(64, 1, 1), float32], meta[relay.Constant][38] // ty=Tensor[(64, 1, 1), float32])
- %184 = fn (%p041: Tensor[(1, 48, 35, 35), float32], %p135: Tensor[(64, 48, 5, 5), float32], %p235: Tensor[(64, 1, 1), float32], %p332: Tensor[(64, 1, 1), float32], __dict__=meta[StrMap][41]) -> Tensor[(1, 64, 35, 35), float32] {
- %180 = nn.conv2d(%p041, %p135, padding=[2, 2], channels=64, kernel_size=[5, 5])
- %181 = multiply(%180, %p235)
- %182 = add(%181, %p332)
- %183 = nn.relu(%182)
- %183
- }
- %189 = fn (%p042: Tensor[(1, 256, 35, 35), float32], %p136: Tensor[(48, 256, 1, 1), float32], %p236: Tensor[(48, 1, 1), float32], %p333: Tensor[(48, 1, 1), float32], __dict__=meta[StrMap][42]) -> Tensor[(1, 48, 35, 35), float32] {
- %185 = nn.conv2d(%p042, %p136, channels=48, kernel_size=[1, 1])
- %186 = multiply(%185, %p236)
- %187 = add(%186, %p333)
- %188 = nn.relu(%187)
- %188
- }
- %190 = %189(%178, meta[relay.Constant][39] // ty=Tensor[(48, 256, 1, 1), float32], meta[relay.Constant][40] // ty=Tensor[(48, 1, 1), float32], meta[relay.Constant][41] // ty=Tensor[(48, 1, 1), float32])
- %191 = %184(%190, meta[relay.Constant][42] // ty=Tensor[(64, 48, 5, 5), float32], meta[relay.Constant][43] // ty=Tensor[(64, 1, 1), float32], meta[relay.Constant][44] // ty=Tensor[(64, 1, 1), float32])
- %196 = fn (%p043: Tensor[(1, 96, 35, 35), float32], %p137: Tensor[(96, 96, 3, 3), float32], %p237: Tensor[(96, 1, 1), float32], %p334: Tensor[(96, 1, 1), float32], __dict__=meta[StrMap][43]) -> Tensor[(1, 96, 35, 35), float32] {
- %192 = nn.conv2d(%p043, %p137, padding=[1, 1], channels=96, kernel_size=[3, 3])
- %193 = multiply(%192, %p237)
- %194 = add(%193, %p334)
- %195 = nn.relu(%194)
- %195
- }
- %201 = fn (%p044: Tensor[(1, 64, 35, 35), float32], %p138: Tensor[(96, 64, 3, 3), float32], %p238: Tensor[(96, 1, 1), float32], %p335: Tensor[(96, 1, 1), float32], __dict__=meta[StrMap][44]) -> Tensor[(1, 96, 35, 35), float32] {
- %197 = nn.conv2d(%p044, %p138, padding=[1, 1], channels=96, kernel_size=[3, 3])
- %198 = multiply(%197, %p238)
- %199 = add(%198, %p335)
- %200 = nn.relu(%199)
- %200
- }
- %206 = fn (%p045: Tensor[(1, 256, 35, 35), float32], %p139: Tensor[(64, 256, 1, 1), float32], %p239: Tensor[(64, 1, 1), float32], %p336: Tensor[(64, 1, 1), float32], __dict__=meta[StrMap][45]) -> Tensor[(1, 64, 35, 35), float32] {
- %202 = nn.conv2d(%p045, %p139, channels=64, kernel_size=[1, 1])
- %203 = multiply(%202, %p239)
- %204 = add(%203, %p336)
- %205 = nn.relu(%204)
- %205
- }
- %207 = %206(%178, meta[relay.Constant][45] // ty=Tensor[(64, 256, 1, 1), float32], meta[relay.Constant][46] // ty=Tensor[(64, 1, 1), float32], meta[relay.Constant][47] // ty=Tensor[(64, 1, 1), float32])
- %208 = %201(%207, meta[relay.Constant][48] // ty=Tensor[(96, 64, 3, 3), float32], meta[relay.Constant][49] // ty=Tensor[(96, 1, 1), float32], meta[relay.Constant][50] // ty=Tensor[(96, 1, 1), float32])
- %209 = %196(%208, meta[relay.Constant][51] // ty=Tensor[(96, 96, 3, 3), float32], meta[relay.Constant][52] // ty=Tensor[(96, 1, 1), float32], meta[relay.Constant][53] // ty=Tensor[(96, 1, 1), float32])
- %214 = fn (%p046: Tensor[(1, 256, 35, 35), float32], %p140: Tensor[(64, 256, 1, 1), float32], %p240: Tensor[(64, 1, 1), float32], %p337: Tensor[(64, 1, 1), float32], __dict__=meta[StrMap][46]) -> Tensor[(1, 64, 35, 35), float32] {
- %210 = nn.conv2d(%p046, %p140, channels=64, kernel_size=[1, 1])
- %211 = multiply(%210, %p240)
- %212 = add(%211, %p337)
- %213 = nn.relu(%212)
- %213
- }
- %216 = fn (%p047: Tensor[(1, 256, 35, 35), float32], __dict__=meta[StrMap][47]) -> Tensor[(1, 256, 35, 35), float32] {
- %215 = nn.avg_pool2d(%p047, pool_size=[3, 3], padding=[1, 1], count_include_pad=True)
- %215
- }
- %217 = %216(%178)
- %218 = %214(%217, meta[relay.Constant][54] // ty=Tensor[(64, 256, 1, 1), float32], meta[relay.Constant][55] // ty=Tensor[(64, 1, 1), float32], meta[relay.Constant][56] // ty=Tensor[(64, 1, 1), float32])
- %219 = %88(%179, %191, %209, %218)
- %220 = %85(%219, meta[relay.Constant][57] // ty=Tensor[(64, 288, 1, 1), float32], meta[relay.Constant][58] // ty=Tensor[(64, 1, 1), float32], meta[relay.Constant][59] // ty=Tensor[(64, 1, 1), float32])
- %225 = fn (%p048: Tensor[(1, 48, 35, 35), float32], %p141: Tensor[(64, 48, 5, 5), float32], %p241: Tensor[(64, 1, 1), float32], %p338: Tensor[(64, 1, 1), float32], __dict__=meta[StrMap][48]) -> Tensor[(1, 64, 35, 35), float32] {
- %221 = nn.conv2d(%p048, %p141, padding=[2, 2], channels=64, kernel_size=[5, 5])
- %222 = multiply(%221, %p241)
- %223 = add(%222, %p338)
- %224 = nn.relu(%223)
- %224
- }
- %230 = fn (%p049: Tensor[(1, 288, 35, 35), float32], %p142: Tensor[(48, 288, 1, 1), float32], %p242: Tensor[(48, 1, 1), float32], %p339: Tensor[(48, 1, 1), float32], __dict__=meta[StrMap][49]) -> Tensor[(1, 48, 35, 35), float32] {
- %226 = nn.conv2d(%p049, %p142, channels=48, kernel_size=[1, 1])
- %227 = multiply(%226, %p242)
- %228 = add(%227, %p339)
- %229 = nn.relu(%228)
- %229
- }
- %231 = %230(%219, meta[relay.Constant][60] // ty=Tensor[(48, 288, 1, 1), float32], meta[relay.Constant][61] // ty=Tensor[(48, 1, 1), float32], meta[relay.Constant][62] // ty=Tensor[(48, 1, 1), float32])
- %232 = %225(%231, meta[relay.Constant][63] // ty=Tensor[(64, 48, 5, 5), float32], meta[relay.Constant][64] // ty=Tensor[(64, 1, 1), float32], meta[relay.Constant][65] // ty=Tensor[(64, 1, 1), float32])
- %237 = fn (%p050: Tensor[(1, 96, 35, 35), float32], %p143: Tensor[(96, 96, 3, 3), float32], %p243: Tensor[(96, 1, 1), float32], %p340: Tensor[(96, 1, 1), float32], __dict__=meta[StrMap][50]) -> Tensor[(1, 96, 35, 35), float32] {
- %233 = nn.conv2d(%p050, %p143, padding=[1, 1], channels=96, kernel_size=[3, 3])
- %234 = multiply(%233, %p243)
- %235 = add(%234, %p340)
- %236 = nn.relu(%235)
- %236
- }
- %242 = fn (%p051: Tensor[(1, 64, 35, 35), float32], %p144: Tensor[(96, 64, 3, 3), float32], %p244: Tensor[(96, 1, 1), float32], %p341: Tensor[(96, 1, 1), float32], __dict__=meta[StrMap][51]) -> Tensor[(1, 96, 35, 35), float32] {
- %238 = nn.conv2d(%p051, %p144, padding=[1, 1], channels=96, kernel_size=[3, 3])
- %239 = multiply(%238, %p244)
- %240 = add(%239, %p341)
- %241 = nn.relu(%240)
- %241
- }
- %247 = fn (%p052: Tensor[(1, 288, 35, 35), float32], %p145: Tensor[(64, 288, 1, 1), float32], %p245: Tensor[(64, 1, 1), float32], %p342: Tensor[(64, 1, 1), float32], __dict__=meta[StrMap][52]) -> Tensor[(1, 64, 35, 35), float32] {
- %243 = nn.conv2d(%p052, %p145, channels=64, kernel_size=[1, 1])
- %244 = multiply(%243, %p245)
- %245 = add(%244, %p342)
- %246 = nn.relu(%245)
- %246
- }
- %248 = %247(%219, meta[relay.Constant][66] // ty=Tensor[(64, 288, 1, 1), float32], meta[relay.Constant][67] // ty=Tensor[(64, 1, 1), float32], meta[relay.Constant][68] // ty=Tensor[(64, 1, 1), float32])
- %249 = %242(%248, meta[relay.Constant][69] // ty=Tensor[(96, 64, 3, 3), float32], meta[relay.Constant][70] // ty=Tensor[(96, 1, 1), float32], meta[relay.Constant][71] // ty=Tensor[(96, 1, 1), float32])
- %250 = %237(%249, meta[relay.Constant][72] // ty=Tensor[(96, 96, 3, 3), float32], meta[relay.Constant][73] // ty=Tensor[(96, 1, 1), float32], meta[relay.Constant][74] // ty=Tensor[(96, 1, 1), float32])
- %255 = fn (%p053: Tensor[(1, 288, 35, 35), float32], %p146: Tensor[(64, 288, 1, 1), float32], %p246: Tensor[(64, 1, 1), float32], %p343: Tensor[(64, 1, 1), float32], __dict__=meta[StrMap][53]) -> Tensor[(1, 64, 35, 35), float32] {
- %251 = nn.conv2d(%p053, %p146, channels=64, kernel_size=[1, 1])
- %252 = multiply(%251, %p246)
- %253 = add(%252, %p343)
- %254 = nn.relu(%253)
- %254
- }
- %257 = fn (%p054: Tensor[(1, 288, 35, 35), float32], __dict__=meta[StrMap][54]) -> Tensor[(1, 288, 35, 35), float32] {
- %256 = nn.avg_pool2d(%p054, pool_size=[3, 3], padding=[1, 1], count_include_pad=True)
- %256
- }
- %258 = %257(%219)
- %259 = %255(%258, meta[relay.Constant][75] // ty=Tensor[(64, 288, 1, 1), float32], meta[relay.Constant][76] // ty=Tensor[(64, 1, 1), float32], meta[relay.Constant][77] // ty=Tensor[(64, 1, 1), float32])
- %260 = %80(%220, %232, %250, %259)
- %261 = %77(%260, meta[relay.Constant][78] // ty=Tensor[(384, 288, 3, 3), float32], meta[relay.Constant][79] // ty=Tensor[(384, 1, 1), float32], meta[relay.Constant][80] // ty=Tensor[(384, 1, 1), float32])
- %266 = fn (%p055: Tensor[(1, 96, 35, 35), float32], %p147: Tensor[(96, 96, 3, 3), float32], %p247: Tensor[(96, 1, 1), float32], %p344: Tensor[(96, 1, 1), float32], __dict__=meta[StrMap][55]) -> Tensor[(1, 96, 17, 17), float32] {
- %262 = nn.conv2d(%p055, %p147, strides=[2, 2], channels=96, kernel_size=[3, 3])
- %263 = multiply(%262, %p247)
- %264 = add(%263, %p344)
- %265 = nn.relu(%264)
- %265
- }
- %271 = fn (%p056: Tensor[(1, 64, 35, 35), float32], %p148: Tensor[(96, 64, 3, 3), float32], %p248: Tensor[(96, 1, 1), float32], %p345: Tensor[(96, 1, 1), float32], __dict__=meta[StrMap][56]) -> Tensor[(1, 96, 35, 35), float32] {
- %267 = nn.conv2d(%p056, %p148, padding=[1, 1], channels=96, kernel_size=[3, 3])
- %268 = multiply(%267, %p248)
- %269 = add(%268, %p345)
- %270 = nn.relu(%269)
- %270
- }
- %276 = fn (%p057: Tensor[(1, 288, 35, 35), float32], %p149: Tensor[(64, 288, 1, 1), float32], %p249: Tensor[(64, 1, 1), float32], %p346: Tensor[(64, 1, 1), float32], __dict__=meta[StrMap][57]) -> Tensor[(1, 64, 35, 35), float32] {
- %272 = nn.conv2d(%p057, %p149, channels=64, kernel_size=[1, 1])
- %273 = multiply(%272, %p249)
- %274 = add(%273, %p346)
- %275 = nn.relu(%274)
- %275
- }
- %277 = %276(%260, meta[relay.Constant][81] // ty=Tensor[(64, 288, 1, 1), float32], meta[relay.Constant][82] // ty=Tensor[(64, 1, 1), float32], meta[relay.Constant][83] // ty=Tensor[(64, 1, 1), float32])
- %278 = %271(%277, meta[relay.Constant][84] // ty=Tensor[(96, 64, 3, 3), float32], meta[relay.Constant][85] // ty=Tensor[(96, 1, 1), float32], meta[relay.Constant][86] // ty=Tensor[(96, 1, 1), float32])
- %279 = %266(%278, meta[relay.Constant][87] // ty=Tensor[(96, 96, 3, 3), float32], meta[relay.Constant][88] // ty=Tensor[(96, 1, 1), float32], meta[relay.Constant][89] // ty=Tensor[(96, 1, 1), float32])
- %281 = fn (%p058: Tensor[(1, 288, 35, 35), float32], __dict__=meta[StrMap][58]) -> Tensor[(1, 288, 17, 17), float32] {
- %280 = nn.max_pool2d(%p058, pool_size=[3, 3], strides=[2, 2])
- %280
- }
- %282 = %281(%260)
- %283 = %72(%261, %279, %282)
- %284 = %69(%283, meta[relay.Constant][90] // ty=Tensor[(192, 768, 1, 1), float32], meta[relay.Constant][91] // ty=Tensor[(192, 1, 1), float32], meta[relay.Constant][92] // ty=Tensor[(192, 1, 1), float32])
- %289 = fn (%p059: Tensor[(1, 128, 17, 17), float32], %p150: Tensor[(192, 128, 7, 1), float32], %p250: Tensor[(192, 1, 1), float32], %p347: Tensor[(192, 1, 1), float32], __dict__=meta[StrMap][59]) -> Tensor[(1, 192, 17, 17), float32] {
- %285 = nn.conv2d(%p059, %p150, padding=[3, 0], channels=192, kernel_size=[7, 1])
- %286 = multiply(%285, %p250)
- %287 = add(%286, %p347)
- %288 = nn.relu(%287)
- %288
- }
- %294 = fn (%p060: Tensor[(1, 128, 17, 17), float32], %p151: Tensor[(128, 128, 1, 7), float32], %p251: Tensor[(128, 1, 1), float32], %p348: Tensor[(128, 1, 1), float32], __dict__=meta[StrMap][60]) -> Tensor[(1, 128, 17, 17), float32] {
- %290 = nn.conv2d(%p060, %p151, padding=[0, 3], channels=128, kernel_size=[1, 7])
- %291 = multiply(%290, %p251)
- %292 = add(%291, %p348)
- %293 = nn.relu(%292)
- %293
- }
- %299 = fn (%p061: Tensor[(1, 768, 17, 17), float32], %p152: Tensor[(128, 768, 1, 1), float32], %p252: Tensor[(128, 1, 1), float32], %p349: Tensor[(128, 1, 1), float32], __dict__=meta[StrMap][61]) -> Tensor[(1, 128, 17, 17), float32] {
- %295 = nn.conv2d(%p061, %p152, channels=128, kernel_size=[1, 1])
- %296 = multiply(%295, %p252)
- %297 = add(%296, %p349)
- %298 = nn.relu(%297)
- %298
- }
- %300 = %299(%283, meta[relay.Constant][93] // ty=Tensor[(128, 768, 1, 1), float32], meta[relay.Constant][94] // ty=Tensor[(128, 1, 1), float32], meta[relay.Constant][95] // ty=Tensor[(128, 1, 1), float32])
- %301 = %294(%300, meta[relay.Constant][96] // ty=Tensor[(128, 128, 1, 7), float32], meta[relay.Constant][97] // ty=Tensor[(128, 1, 1), float32], meta[relay.Constant][98] // ty=Tensor[(128, 1, 1), float32])
- %302 = %289(%301, meta[relay.Constant][99] // ty=Tensor[(192, 128, 7, 1), float32], meta[relay.Constant][100] // ty=Tensor[(192, 1, 1), float32], meta[relay.Constant][101] // ty=Tensor[(192, 1, 1), float32])
- %307 = fn (%p062: Tensor[(1, 128, 17, 17), float32], %p153: Tensor[(192, 128, 1, 7), float32], %p253: Tensor[(192, 1, 1), float32], %p350: Tensor[(192, 1, 1), float32], __dict__=meta[StrMap][62]) -> Tensor[(1, 192, 17, 17), float32] {
- %303 = nn.conv2d(%p062, %p153, padding=[0, 3], channels=192, kernel_size=[1, 7])
- %304 = multiply(%303, %p253)
- %305 = add(%304, %p350)
- %306 = nn.relu(%305)
- %306
- }
- %312 = fn (%p063: Tensor[(1, 128, 17, 17), float32], %p154: Tensor[(128, 128, 7, 1), float32], %p254: Tensor[(128, 1, 1), float32], %p351: Tensor[(128, 1, 1), float32], __dict__=meta[StrMap][63]) -> Tensor[(1, 128, 17, 17), float32] {
- %308 = nn.conv2d(%p063, %p154, padding=[3, 0], channels=128, kernel_size=[7, 1])
- %309 = multiply(%308, %p254)
- %310 = add(%309, %p351)
- %311 = nn.relu(%310)
- %311
- }
- %317 = fn (%p064: Tensor[(1, 128, 17, 17), float32], %p155: Tensor[(128, 128, 1, 7), float32], %p255: Tensor[(128, 1, 1), float32], %p352: Tensor[(128, 1, 1), float32], __dict__=meta[StrMap][64]) -> Tensor[(1, 128, 17, 17), float32] {
- %313 = nn.conv2d(%p064, %p155, padding=[0, 3], channels=128, kernel_size=[1, 7])
- %314 = multiply(%313, %p255)
- %315 = add(%314, %p352)
- %316 = nn.relu(%315)
- %316
- }
- %322 = fn (%p065: Tensor[(1, 128, 17, 17), float32], %p156: Tensor[(128, 128, 7, 1), float32], %p256: Tensor[(128, 1, 1), float32], %p353: Tensor[(128, 1, 1), float32], __dict__=meta[StrMap][65]) -> Tensor[(1, 128, 17, 17), float32] {
- %318 = nn.conv2d(%p065, %p156, padding=[3, 0], channels=128, kernel_size=[7, 1])
- %319 = multiply(%318, %p256)
- %320 = add(%319, %p353)
- %321 = nn.relu(%320)
- %321
- }
- %327 = fn (%p066: Tensor[(1, 768, 17, 17), float32], %p157: Tensor[(128, 768, 1, 1), float32], %p257: Tensor[(128, 1, 1), float32], %p354: Tensor[(128, 1, 1), float32], __dict__=meta[StrMap][66]) -> Tensor[(1, 128, 17, 17), float32] {
- %323 = nn.conv2d(%p066, %p157, channels=128, kernel_size=[1, 1])
- %324 = multiply(%323, %p257)
- %325 = add(%324, %p354)
- %326 = nn.relu(%325)
- %326
- }
- %328 = %327(%283, meta[relay.Constant][102] // ty=Tensor[(128, 768, 1, 1), float32], meta[relay.Constant][103] // ty=Tensor[(128, 1, 1), float32], meta[relay.Constant][104] // ty=Tensor[(128, 1, 1), float32])
- %329 = %322(%328, meta[relay.Constant][105] // ty=Tensor[(128, 128, 7, 1), float32], meta[relay.Constant][106] // ty=Tensor[(128, 1, 1), float32], meta[relay.Constant][107] // ty=Tensor[(128, 1, 1), float32])
- %330 = %317(%329, meta[relay.Constant][108] // ty=Tensor[(128, 128, 1, 7), float32], meta[relay.Constant][109] // ty=Tensor[(128, 1, 1), float32], meta[relay.Constant][110] // ty=Tensor[(128, 1, 1), float32])
- %331 = %312(%330, meta[relay.Constant][111] // ty=Tensor[(128, 128, 7, 1), float32], meta[relay.Constant][112] // ty=Tensor[(128, 1, 1), float32], meta[relay.Constant][113] // ty=Tensor[(128, 1, 1), float32])
- %332 = %307(%331, meta[relay.Constant][114] // ty=Tensor[(192, 128, 1, 7), float32], meta[relay.Constant][115] // ty=Tensor[(192, 1, 1), float32], meta[relay.Constant][116] // ty=Tensor[(192, 1, 1), float32])
- %337 = fn (%p067: Tensor[(1, 768, 17, 17), float32], %p158: Tensor[(192, 768, 1, 1), float32], %p258: Tensor[(192, 1, 1), float32], %p355: Tensor[(192, 1, 1), float32], __dict__=meta[StrMap][67]) -> Tensor[(1, 192, 17, 17), float32] {
- %333 = nn.conv2d(%p067, %p158, channels=192, kernel_size=[1, 1])
- %334 = multiply(%333, %p258)
- %335 = add(%334, %p355)
- %336 = nn.relu(%335)
- %336
- }
- %339 = fn (%p068: Tensor[(1, 768, 17, 17), float32], __dict__=meta[StrMap][68]) -> Tensor[(1, 768, 17, 17), float32] {
- %338 = nn.avg_pool2d(%p068, pool_size=[3, 3], padding=[1, 1], count_include_pad=True)
- %338
- }
- %340 = %339(%283)
- %341 = %337(%340, meta[relay.Constant][117] // ty=Tensor[(192, 768, 1, 1), float32], meta[relay.Constant][118] // ty=Tensor[(192, 1, 1), float32], meta[relay.Constant][119] // ty=Tensor[(192, 1, 1), float32])
- %342 = %64(%284, %302, %332, %341)
- %343 = %61(%342, meta[relay.Constant][120] // ty=Tensor[(192, 768, 1, 1), float32], meta[relay.Constant][121] // ty=Tensor[(192, 1, 1), float32], meta[relay.Constant][122] // ty=Tensor[(192, 1, 1), float32])
- %348 = fn (%p069: Tensor[(1, 160, 17, 17), float32], %p159: Tensor[(192, 160, 7, 1), float32], %p259: Tensor[(192, 1, 1), float32], %p356: Tensor[(192, 1, 1), float32], __dict__=meta[StrMap][69]) -> Tensor[(1, 192, 17, 17), float32] {
- %344 = nn.conv2d(%p069, %p159, padding=[3, 0], channels=192, kernel_size=[7, 1])
- %345 = multiply(%344, %p259)
- %346 = add(%345, %p356)
- %347 = nn.relu(%346)
- %347
- }
- %353 = fn (%p070: Tensor[(1, 160, 17, 17), float32], %p160: Tensor[(160, 160, 1, 7), float32], %p260: Tensor[(160, 1, 1), float32], %p357: Tensor[(160, 1, 1), float32], __dict__=meta[StrMap][70]) -> Tensor[(1, 160, 17, 17), float32] {
- %349 = nn.conv2d(%p070, %p160, padding=[0, 3], channels=160, kernel_size=[1, 7])
- %350 = multiply(%349, %p260)
- %351 = add(%350, %p357)
- %352 = nn.relu(%351)
- %352
- }
- %358 = fn (%p071: Tensor[(1, 768, 17, 17), float32], %p161: Tensor[(160, 768, 1, 1), float32], %p261: Tensor[(160, 1, 1), float32], %p358: Tensor[(160, 1, 1), float32], __dict__=meta[StrMap][71]) -> Tensor[(1, 160, 17, 17), float32] {
- %354 = nn.conv2d(%p071, %p161, channels=160, kernel_size=[1, 1])
- %355 = multiply(%354, %p261)
- %356 = add(%355, %p358)
- %357 = nn.relu(%356)
- %357
- }
- %359 = %358(%342, meta[relay.Constant][123] // ty=Tensor[(160, 768, 1, 1), float32], meta[relay.Constant][124] // ty=Tensor[(160, 1, 1), float32], meta[relay.Constant][125] // ty=Tensor[(160, 1, 1), float32])
- %360 = %353(%359, meta[relay.Constant][126] // ty=Tensor[(160, 160, 1, 7), float32], meta[relay.Constant][127] // ty=Tensor[(160, 1, 1), float32], meta[relay.Constant][128] // ty=Tensor[(160, 1, 1), float32])
- %361 = %348(%360, meta[relay.Constant][129] // ty=Tensor[(192, 160, 7, 1), float32], meta[relay.Constant][130] // ty=Tensor[(192, 1, 1), float32], meta[relay.Constant][131] // ty=Tensor[(192, 1, 1), float32])
- %366 = fn (%p072: Tensor[(1, 160, 17, 17), float32], %p162: Tensor[(192, 160, 1, 7), float32], %p262: Tensor[(192, 1, 1), float32], %p359: Tensor[(192, 1, 1), float32], __dict__=meta[StrMap][72]) -> Tensor[(1, 192, 17, 17), float32] {
- %362 = nn.conv2d(%p072, %p162, padding=[0, 3], channels=192, kernel_size=[1, 7])
- %363 = multiply(%362, %p262)
- %364 = add(%363, %p359)
- %365 = nn.relu(%364)
- %365
- }
- %371 = fn (%p073: Tensor[(1, 160, 17, 17), float32], %p163: Tensor[(160, 160, 7, 1), float32], %p263: Tensor[(160, 1, 1), float32], %p360: Tensor[(160, 1, 1), float32], __dict__=meta[StrMap][73]) -> Tensor[(1, 160, 17, 17), float32] {
- %367 = nn.conv2d(%p073, %p163, padding=[3, 0], channels=160, kernel_size=[7, 1])
- %368 = multiply(%367, %p263)
- %369 = add(%368, %p360)
- %370 = nn.relu(%369)
- %370
- }
- %376 = fn (%p074: Tensor[(1, 160, 17, 17), float32], %p164: Tensor[(160, 160, 1, 7), float32], %p264: Tensor[(160, 1, 1), float32], %p361: Tensor[(160, 1, 1), float32], __dict__=meta[StrMap][74]) -> Tensor[(1, 160, 17, 17), float32] {
- %372 = nn.conv2d(%p074, %p164, padding=[0, 3], channels=160, kernel_size=[1, 7])
- %373 = multiply(%372, %p264)
- %374 = add(%373, %p361)
- %375 = nn.relu(%374)
- %375
- }
- %381 = fn (%p075: Tensor[(1, 160, 17, 17), float32], %p165: Tensor[(160, 160, 7, 1), float32], %p265: Tensor[(160, 1, 1), float32], %p362: Tensor[(160, 1, 1), float32], __dict__=meta[StrMap][75]) -> Tensor[(1, 160, 17, 17), float32] {
- %377 = nn.conv2d(%p075, %p165, padding=[3, 0], channels=160, kernel_size=[7, 1])
- %378 = multiply(%377, %p265)
- %379 = add(%378, %p362)
- %380 = nn.relu(%379)
- %380
- }
- %386 = fn (%p076: Tensor[(1, 768, 17, 17), float32], %p166: Tensor[(160, 768, 1, 1), float32], %p266: Tensor[(160, 1, 1), float32], %p363: Tensor[(160, 1, 1), float32], __dict__=meta[StrMap][76]) -> Tensor[(1, 160, 17, 17), float32] {
- %382 = nn.conv2d(%p076, %p166, channels=160, kernel_size=[1, 1])
- %383 = multiply(%382, %p266)
- %384 = add(%383, %p363)
- %385 = nn.relu(%384)
- %385
- }
- %387 = %386(%342, meta[relay.Constant][132] // ty=Tensor[(160, 768, 1, 1), float32], meta[relay.Constant][133] // ty=Tensor[(160, 1, 1), float32], meta[relay.Constant][134] // ty=Tensor[(160, 1, 1), float32])
- %388 = %381(%387, meta[relay.Constant][135] // ty=Tensor[(160, 160, 7, 1), float32], meta[relay.Constant][136] // ty=Tensor[(160, 1, 1), float32], meta[relay.Constant][137] // ty=Tensor[(160, 1, 1), float32])
- %389 = %376(%388, meta[relay.Constant][138] // ty=Tensor[(160, 160, 1, 7), float32], meta[relay.Constant][139] // ty=Tensor[(160, 1, 1), float32], meta[relay.Constant][140] // ty=Tensor[(160, 1, 1), float32])
- %390 = %371(%389, meta[relay.Constant][141] // ty=Tensor[(160, 160, 7, 1), float32], meta[relay.Constant][142] // ty=Tensor[(160, 1, 1), float32], meta[relay.Constant][143] // ty=Tensor[(160, 1, 1), float32])
- %391 = %366(%390, meta[relay.Constant][144] // ty=Tensor[(192, 160, 1, 7), float32], meta[relay.Constant][145] // ty=Tensor[(192, 1, 1), float32], meta[relay.Constant][146] // ty=Tensor[(192, 1, 1), float32])
- %396 = fn (%p077: Tensor[(1, 768, 17, 17), float32], %p167: Tensor[(192, 768, 1, 1), float32], %p267: Tensor[(192, 1, 1), float32], %p364: Tensor[(192, 1, 1), float32], __dict__=meta[StrMap][77]) -> Tensor[(1, 192, 17, 17), float32] {
- %392 = nn.conv2d(%p077, %p167, channels=192, kernel_size=[1, 1])
- %393 = multiply(%392, %p267)
- %394 = add(%393, %p364)
- %395 = nn.relu(%394)
- %395
- }
- %398 = fn (%p078: Tensor[(1, 768, 17, 17), float32], __dict__=meta[StrMap][78]) -> Tensor[(1, 768, 17, 17), float32] {
- %397 = nn.avg_pool2d(%p078, pool_size=[3, 3], padding=[1, 1], count_include_pad=True)
- %397
- }
- %399 = %398(%342)
- %400 = %396(%399, meta[relay.Constant][147] // ty=Tensor[(192, 768, 1, 1), float32], meta[relay.Constant][148] // ty=Tensor[(192, 1, 1), float32], meta[relay.Constant][149] // ty=Tensor[(192, 1, 1), float32])
- %401 = %56(%343, %361, %391, %400)
- %402 = %53(%401, meta[relay.Constant][150] // ty=Tensor[(192, 768, 1, 1), float32], meta[relay.Constant][151] // ty=Tensor[(192, 1, 1), float32], meta[relay.Constant][152] // ty=Tensor[(192, 1, 1), float32])
- %407 = fn (%p079: Tensor[(1, 160, 17, 17), float32], %p168: Tensor[(192, 160, 7, 1), float32], %p268: Tensor[(192, 1, 1), float32], %p365: Tensor[(192, 1, 1), float32], __dict__=meta[StrMap][79]) -> Tensor[(1, 192, 17, 17), float32] {
- %403 = nn.conv2d(%p079, %p168, padding=[3, 0], channels=192, kernel_size=[7, 1])
- %404 = multiply(%403, %p268)
- %405 = add(%404, %p365)
- %406 = nn.relu(%405)
- %406
- }
- %412 = fn (%p080: Tensor[(1, 160, 17, 17), float32], %p169: Tensor[(160, 160, 1, 7), float32], %p269: Tensor[(160, 1, 1), float32], %p366: Tensor[(160, 1, 1), float32], __dict__=meta[StrMap][80]) -> Tensor[(1, 160, 17, 17), float32] {
- %408 = nn.conv2d(%p080, %p169, padding=[0, 3], channels=160, kernel_size=[1, 7])
- %409 = multiply(%408, %p269)
- %410 = add(%409, %p366)
- %411 = nn.relu(%410)
- %411
- }
- %417 = fn (%p081: Tensor[(1, 768, 17, 17), float32], %p170: Tensor[(160, 768, 1, 1), float32], %p270: Tensor[(160, 1, 1), float32], %p367: Tensor[(160, 1, 1), float32], __dict__=meta[StrMap][81]) -> Tensor[(1, 160, 17, 17), float32] {
- %413 = nn.conv2d(%p081, %p170, channels=160, kernel_size=[1, 1])
- %414 = multiply(%413, %p270)
- %415 = add(%414, %p367)
- %416 = nn.relu(%415)
- %416
- }
- %418 = %417(%401, meta[relay.Constant][153] // ty=Tensor[(160, 768, 1, 1), float32], meta[relay.Constant][154] // ty=Tensor[(160, 1, 1), float32], meta[relay.Constant][155] // ty=Tensor[(160, 1, 1), float32])
- %419 = %412(%418, meta[relay.Constant][156] // ty=Tensor[(160, 160, 1, 7), float32], meta[relay.Constant][157] // ty=Tensor[(160, 1, 1), float32], meta[relay.Constant][158] // ty=Tensor[(160, 1, 1), float32])
- %420 = %407(%419, meta[relay.Constant][159] // ty=Tensor[(192, 160, 7, 1), float32], meta[relay.Constant][160] // ty=Tensor[(192, 1, 1), float32], meta[relay.Constant][161] // ty=Tensor[(192, 1, 1), float32])
- %425 = fn (%p082: Tensor[(1, 160, 17, 17), float32], %p171: Tensor[(192, 160, 1, 7), float32], %p271: Tensor[(192, 1, 1), float32], %p368: Tensor[(192, 1, 1), float32], __dict__=meta[StrMap][82]) -> Tensor[(1, 192, 17, 17), float32] {
- %421 = nn.conv2d(%p082, %p171, padding=[0, 3], channels=192, kernel_size=[1, 7])
- %422 = multiply(%421, %p271)
- %423 = add(%422, %p368)
- %424 = nn.relu(%423)
- %424
- }
- %430 = fn (%p083: Tensor[(1, 160, 17, 17), float32], %p172: Tensor[(160, 160, 7, 1), float32], %p272: Tensor[(160, 1, 1), float32], %p369: Tensor[(160, 1, 1), float32], __dict__=meta[StrMap][83]) -> Tensor[(1, 160, 17, 17), float32] {
- %426 = nn.conv2d(%p083, %p172, padding=[3, 0], channels=160, kernel_size=[7, 1])
- %427 = multiply(%426, %p272)
- %428 = add(%427, %p369)
- %429 = nn.relu(%428)
- %429
- }
- %435 = fn (%p084: Tensor[(1, 160, 17, 17), float32], %p173: Tensor[(160, 160, 1, 7), float32], %p273: Tensor[(160, 1, 1), float32], %p370: Tensor[(160, 1, 1), float32], __dict__=meta[StrMap][84]) -> Tensor[(1, 160, 17, 17), float32] {
- %431 = nn.conv2d(%p084, %p173, padding=[0, 3], channels=160, kernel_size=[1, 7])
- %432 = multiply(%431, %p273)
- %433 = add(%432, %p370)
- %434 = nn.relu(%433)
- %434
- }
- %440 = fn (%p085: Tensor[(1, 160, 17, 17), float32], %p174: Tensor[(160, 160, 7, 1), float32], %p274: Tensor[(160, 1, 1), float32], %p371: Tensor[(160, 1, 1), float32], __dict__=meta[StrMap][85]) -> Tensor[(1, 160, 17, 17), float32] {
- %436 = nn.conv2d(%p085, %p174, padding=[3, 0], channels=160, kernel_size=[7, 1])
- %437 = multiply(%436, %p274)
- %438 = add(%437, %p371)
- %439 = nn.relu(%438)
- %439
- }
- %445 = fn (%p086: Tensor[(1, 768, 17, 17), float32], %p175: Tensor[(160, 768, 1, 1), float32], %p275: Tensor[(160, 1, 1), float32], %p372: Tensor[(160, 1, 1), float32], __dict__=meta[StrMap][86]) -> Tensor[(1, 160, 17, 17), float32] {
- %441 = nn.conv2d(%p086, %p175, channels=160, kernel_size=[1, 1])
- %442 = multiply(%441, %p275)
- %443 = add(%442, %p372)
- %444 = nn.relu(%443)
- %444
- }
- %446 = %445(%401, meta[relay.Constant][162] // ty=Tensor[(160, 768, 1, 1), float32], meta[relay.Constant][163] // ty=Tensor[(160, 1, 1), float32], meta[relay.Constant][164] // ty=Tensor[(160, 1, 1), float32])
- %447 = %440(%446, meta[relay.Constant][165] // ty=Tensor[(160, 160, 7, 1), float32], meta[relay.Constant][166] // ty=Tensor[(160, 1, 1), float32], meta[relay.Constant][167] // ty=Tensor[(160, 1, 1), float32])
- %448 = %435(%447, meta[relay.Constant][168] // ty=Tensor[(160, 160, 1, 7), float32], meta[relay.Constant][169] // ty=Tensor[(160, 1, 1), float32], meta[relay.Constant][170] // ty=Tensor[(160, 1, 1), float32])
- %449 = %430(%448, meta[relay.Constant][171] // ty=Tensor[(160, 160, 7, 1), float32], meta[relay.Constant][172] // ty=Tensor[(160, 1, 1), float32], meta[relay.Constant][173] // ty=Tensor[(160, 1, 1), float32])
- %450 = %425(%449, meta[relay.Constant][174] // ty=Tensor[(192, 160, 1, 7), float32], meta[relay.Constant][175] // ty=Tensor[(192, 1, 1), float32], meta[relay.Constant][176] // ty=Tensor[(192, 1, 1), float32])
- %455 = fn (%p087: Tensor[(1, 768, 17, 17), float32], %p176: Tensor[(192, 768, 1, 1), float32], %p276: Tensor[(192, 1, 1), float32], %p373: Tensor[(192, 1, 1), float32], __dict__=meta[StrMap][87]) -> Tensor[(1, 192, 17, 17), float32] {
- %451 = nn.conv2d(%p087, %p176, channels=192, kernel_size=[1, 1])
- %452 = multiply(%451, %p276)
- %453 = add(%452, %p373)
- %454 = nn.relu(%453)
- %454
- }
- %457 = fn (%p088: Tensor[(1, 768, 17, 17), float32], __dict__=meta[StrMap][88]) -> Tensor[(1, 768, 17, 17), float32] {
- %456 = nn.avg_pool2d(%p088, pool_size=[3, 3], padding=[1, 1], count_include_pad=True)
- %456
- }
- %458 = %457(%401)
- %459 = %455(%458, meta[relay.Constant][177] // ty=Tensor[(192, 768, 1, 1), float32], meta[relay.Constant][178] // ty=Tensor[(192, 1, 1), float32], meta[relay.Constant][179] // ty=Tensor[(192, 1, 1), float32])
- %460 = %48(%402, %420, %450, %459)
- %461 = %45(%460, meta[relay.Constant][180] // ty=Tensor[(192, 768, 1, 1), float32], meta[relay.Constant][181] // ty=Tensor[(192, 1, 1), float32], meta[relay.Constant][182] // ty=Tensor[(192, 1, 1), float32])
- %466 = fn (%p089: Tensor[(1, 192, 17, 17), float32], %p177: Tensor[(192, 192, 7, 1), float32], %p277: Tensor[(192, 1, 1), float32], %p374: Tensor[(192, 1, 1), float32], __dict__=meta[StrMap][89]) -> Tensor[(1, 192, 17, 17), float32] {
- %462 = nn.conv2d(%p089, %p177, padding=[3, 0], channels=192, kernel_size=[7, 1])
- %463 = multiply(%462, %p277)
- %464 = add(%463, %p374)
- %465 = nn.relu(%464)
- %465
- }
- %471 = fn (%p090: Tensor[(1, 192, 17, 17), float32], %p178: Tensor[(192, 192, 1, 7), float32], %p278: Tensor[(192, 1, 1), float32], %p375: Tensor[(192, 1, 1), float32], __dict__=meta[StrMap][90]) -> Tensor[(1, 192, 17, 17), float32] {
- %467 = nn.conv2d(%p090, %p178, padding=[0, 3], channels=192, kernel_size=[1, 7])
- %468 = multiply(%467, %p278)
- %469 = add(%468, %p375)
- %470 = nn.relu(%469)
- %470
- }
- %476 = fn (%p091: Tensor[(1, 768, 17, 17), float32], %p179: Tensor[(192, 768, 1, 1), float32], %p279: Tensor[(192, 1, 1), float32], %p376: Tensor[(192, 1, 1), float32], __dict__=meta[StrMap][91]) -> Tensor[(1, 192, 17, 17), float32] {
- %472 = nn.conv2d(%p091, %p179, channels=192, kernel_size=[1, 1])
- %473 = multiply(%472, %p279)
- %474 = add(%473, %p376)
- %475 = nn.relu(%474)
- %475
- }
- %477 = %476(%460, meta[relay.Constant][183] // ty=Tensor[(192, 768, 1, 1), float32], meta[relay.Constant][184] // ty=Tensor[(192, 1, 1), float32], meta[relay.Constant][185] // ty=Tensor[(192, 1, 1), float32])
- %478 = %471(%477, meta[relay.Constant][186] // ty=Tensor[(192, 192, 1, 7), float32], meta[relay.Constant][187] // ty=Tensor[(192, 1, 1), float32], meta[relay.Constant][188] // ty=Tensor[(192, 1, 1), float32])
- %479 = %466(%478, meta[relay.Constant][189] // ty=Tensor[(192, 192, 7, 1), float32], meta[relay.Constant][190] // ty=Tensor[(192, 1, 1), float32], meta[relay.Constant][191] // ty=Tensor[(192, 1, 1), float32])
- %484 = fn (%p092: Tensor[(1, 192, 17, 17), float32], %p180: Tensor[(192, 192, 1, 7), float32], %p280: Tensor[(192, 1, 1), float32], %p377: Tensor[(192, 1, 1), float32], __dict__=meta[StrMap][92]) -> Tensor[(1, 192, 17, 17), float32] {
- %480 = nn.conv2d(%p092, %p180, padding=[0, 3], channels=192, kernel_size=[1, 7])
- %481 = multiply(%480, %p280)
- %482 = add(%481, %p377)
- %483 = nn.relu(%482)
- %483
- }
- %489 = fn (%p093: Tensor[(1, 192, 17, 17), float32], %p181: Tensor[(192, 192, 7, 1), float32], %p281: Tensor[(192, 1, 1), float32], %p378: Tensor[(192, 1, 1), float32], __dict__=meta[StrMap][93]) -> Tensor[(1, 192, 17, 17), float32] {
- %485 = nn.conv2d(%p093, %p181, padding=[3, 0], channels=192, kernel_size=[7, 1])
- %486 = multiply(%485, %p281)
- %487 = add(%486, %p378)
- %488 = nn.relu(%487)
- %488
- }
- %494 = fn (%p094: Tensor[(1, 192, 17, 17), float32], %p182: Tensor[(192, 192, 1, 7), float32], %p282: Tensor[(192, 1, 1), float32], %p379: Tensor[(192, 1, 1), float32], __dict__=meta[StrMap][94]) -> Tensor[(1, 192, 17, 17), float32] {
- %490 = nn.conv2d(%p094, %p182, padding=[0, 3], channels=192, kernel_size=[1, 7])
- %491 = multiply(%490, %p282)
- %492 = add(%491, %p379)
- %493 = nn.relu(%492)
- %493
- }
- %499 = fn (%p095: Tensor[(1, 192, 17, 17), float32], %p183: Tensor[(192, 192, 7, 1), float32], %p283: Tensor[(192, 1, 1), float32], %p380: Tensor[(192, 1, 1), float32], __dict__=meta[StrMap][95]) -> Tensor[(1, 192, 17, 17), float32] {
- %495 = nn.conv2d(%p095, %p183, padding=[3, 0], channels=192, kernel_size=[7, 1])
- %496 = multiply(%495, %p283)
- %497 = add(%496, %p380)
- %498 = nn.relu(%497)
- %498
- }
- %504 = fn (%p096: Tensor[(1, 768, 17, 17), float32], %p184: Tensor[(192, 768, 1, 1), float32], %p284: Tensor[(192, 1, 1), float32], %p381: Tensor[(192, 1, 1), float32], __dict__=meta[StrMap][96]) -> Tensor[(1, 192, 17, 17), float32] {
- %500 = nn.conv2d(%p096, %p184, channels=192, kernel_size=[1, 1])
- %501 = multiply(%500, %p284)
- %502 = add(%501, %p381)
- %503 = nn.relu(%502)
- %503
- }
- %505 = %504(%460, meta[relay.Constant][192] // ty=Tensor[(192, 768, 1, 1), float32], meta[relay.Constant][193] // ty=Tensor[(192, 1, 1), float32], meta[relay.Constant][194] // ty=Tensor[(192, 1, 1), float32])
- %506 = %499(%505, meta[relay.Constant][195] // ty=Tensor[(192, 192, 7, 1), float32], meta[relay.Constant][196] // ty=Tensor[(192, 1, 1), float32], meta[relay.Constant][197] // ty=Tensor[(192, 1, 1), float32])
- %507 = %494(%506, meta[relay.Constant][198] // ty=Tensor[(192, 192, 1, 7), float32], meta[relay.Constant][199] // ty=Tensor[(192, 1, 1), float32], meta[relay.Constant][200] // ty=Tensor[(192, 1, 1), float32])
- %508 = %489(%507, meta[relay.Constant][201] // ty=Tensor[(192, 192, 7, 1), float32], meta[relay.Constant][202] // ty=Tensor[(192, 1, 1), float32], meta[relay.Constant][203] // ty=Tensor[(192, 1, 1), float32])
- %509 = %484(%508, meta[relay.Constant][204] // ty=Tensor[(192, 192, 1, 7), float32], meta[relay.Constant][205] // ty=Tensor[(192, 1, 1), float32], meta[relay.Constant][206] // ty=Tensor[(192, 1, 1), float32])
- %514 = fn (%p097: Tensor[(1, 768, 17, 17), float32], %p185: Tensor[(192, 768, 1, 1), float32], %p285: Tensor[(192, 1, 1), float32], %p382: Tensor[(192, 1, 1), float32], __dict__=meta[StrMap][97]) -> Tensor[(1, 192, 17, 17), float32] {
- %510 = nn.conv2d(%p097, %p185, channels=192, kernel_size=[1, 1])
- %511 = multiply(%510, %p285)
- %512 = add(%511, %p382)
- %513 = nn.relu(%512)
- %513
- }
- %516 = fn (%p098: Tensor[(1, 768, 17, 17), float32], __dict__=meta[StrMap][98]) -> Tensor[(1, 768, 17, 17), float32] {
- %515 = nn.avg_pool2d(%p098, pool_size=[3, 3], padding=[1, 1], count_include_pad=True)
- %515
- }
- %517 = %516(%460)
- %518 = %514(%517, meta[relay.Constant][207] // ty=Tensor[(192, 768, 1, 1), float32], meta[relay.Constant][208] // ty=Tensor[(192, 1, 1), float32], meta[relay.Constant][209] // ty=Tensor[(192, 1, 1), float32])
- %519 = %40(%461, %479, %509, %518)
- %520 = %37(%519, meta[relay.Constant][210] // ty=Tensor[(192, 768, 1, 1), float32], meta[relay.Constant][211] // ty=Tensor[(192, 1, 1), float32], meta[relay.Constant][212] // ty=Tensor[(192, 1, 1), float32])
- %521 = %32(%520, meta[relay.Constant][213] // ty=Tensor[(320, 192, 3, 3), float32], meta[relay.Constant][214] // ty=Tensor[(320, 1, 1), float32], meta[relay.Constant][215] // ty=Tensor[(320, 1, 1), float32])
- %526 = fn (%p099: Tensor[(1, 192, 17, 17), float32], %p186: Tensor[(192, 192, 3, 3), float32], %p286: Tensor[(192, 1, 1), float32], %p383: Tensor[(192, 1, 1), float32], __dict__=meta[StrMap][99]) -> Tensor[(1, 192, 8, 8), float32] {
- %522 = nn.conv2d(%p099, %p186, strides=[2, 2], channels=192, kernel_size=[3, 3])
- %523 = multiply(%522, %p286)
- %524 = add(%523, %p383)
- %525 = nn.relu(%524)
- %525
- }
- %531 = fn (%p0100: Tensor[(1, 192, 17, 17), float32], %p187: Tensor[(192, 192, 7, 1), float32], %p287: Tensor[(192, 1, 1), float32], %p384: Tensor[(192, 1, 1), float32], __dict__=meta[StrMap][100]) -> Tensor[(1, 192, 17, 17), float32] {
- %527 = nn.conv2d(%p0100, %p187, padding=[3, 0], channels=192, kernel_size=[7, 1])
- %528 = multiply(%527, %p287)
- %529 = add(%528, %p384)
- %530 = nn.relu(%529)
- %530
- }
- %536 = fn (%p0101: Tensor[(1, 192, 17, 17), float32], %p188: Tensor[(192, 192, 1, 7), float32], %p288: Tensor[(192, 1, 1), float32], %p385: Tensor[(192, 1, 1), float32], __dict__=meta[StrMap][101]) -> Tensor[(1, 192, 17, 17), float32] {
- %532 = nn.conv2d(%p0101, %p188, padding=[0, 3], channels=192, kernel_size=[1, 7])
- %533 = multiply(%532, %p288)
- %534 = add(%533, %p385)
- %535 = nn.relu(%534)
- %535
- }
- %541 = fn (%p0102: Tensor[(1, 768, 17, 17), float32], %p189: Tensor[(192, 768, 1, 1), float32], %p289: Tensor[(192, 1, 1), float32], %p386: Tensor[(192, 1, 1), float32], __dict__=meta[StrMap][102]) -> Tensor[(1, 192, 17, 17), float32] {
- %537 = nn.conv2d(%p0102, %p189, channels=192, kernel_size=[1, 1])
- %538 = multiply(%537, %p289)
- %539 = add(%538, %p386)
- %540 = nn.relu(%539)
- %540
- }
- %542 = %541(%519, meta[relay.Constant][216] // ty=Tensor[(192, 768, 1, 1), float32], meta[relay.Constant][217] // ty=Tensor[(192, 1, 1), float32], meta[relay.Constant][218] // ty=Tensor[(192, 1, 1), float32])
- %543 = %536(%542, meta[relay.Constant][219] // ty=Tensor[(192, 192, 1, 7), float32], meta[relay.Constant][220] // ty=Tensor[(192, 1, 1), float32], meta[relay.Constant][221] // ty=Tensor[(192, 1, 1), float32])
- %544 = %531(%543, meta[relay.Constant][222] // ty=Tensor[(192, 192, 7, 1), float32], meta[relay.Constant][223] // ty=Tensor[(192, 1, 1), float32], meta[relay.Constant][224] // ty=Tensor[(192, 1, 1), float32])
- %545 = %526(%544, meta[relay.Constant][225] // ty=Tensor[(192, 192, 3, 3), float32], meta[relay.Constant][226] // ty=Tensor[(192, 1, 1), float32], meta[relay.Constant][227] // ty=Tensor[(192, 1, 1), float32])
- %547 = fn (%p0103: Tensor[(1, 768, 17, 17), float32], __dict__=meta[StrMap][103]) -> Tensor[(1, 768, 8, 8), float32] {
- %546 = nn.max_pool2d(%p0103, pool_size=[3, 3], strides=[2, 2])
- %546
- }
- %548 = %547(%519)
- %549 = %27(%521, %545, %548)
- %550 = %24(%549, meta[relay.Constant][228] // ty=Tensor[(320, 1280, 1, 1), float32], meta[relay.Constant][229] // ty=Tensor[(320, 1, 1), float32], meta[relay.Constant][230] // ty=Tensor[(320, 1, 1), float32])
- %555 = fn (%p0104: Tensor[(1, 384, 8, 8), float32], %p190: Tensor[(384, 384, 1, 3), float32], %p290: Tensor[(384, 1, 1), float32], %p387: Tensor[(384, 1, 1), float32], __dict__=meta[StrMap][104]) -> Tensor[(1, 384, 8, 8), float32] {
- %551 = nn.conv2d(%p0104, %p190, padding=[0, 1], channels=384, kernel_size=[1, 3])
- %552 = multiply(%551, %p290)
- %553 = add(%552, %p387)
- %554 = nn.relu(%553)
- %554
- }
- %560 = fn (%p0105: Tensor[(1, 1280, 8, 8), float32], %p191: Tensor[(384, 1280, 1, 1), float32], %p291: Tensor[(384, 1, 1), float32], %p388: Tensor[(384, 1, 1), float32], __dict__=meta[StrMap][105]) -> Tensor[(1, 384, 8, 8), float32] {
- %556 = nn.conv2d(%p0105, %p191, channels=384, kernel_size=[1, 1])
- %557 = multiply(%556, %p291)
- %558 = add(%557, %p388)
- %559 = nn.relu(%558)
- %559
- }
- %561 = %560(%549, meta[relay.Constant][231] // ty=Tensor[(384, 1280, 1, 1), float32], meta[relay.Constant][232] // ty=Tensor[(384, 1, 1), float32], meta[relay.Constant][233] // ty=Tensor[(384, 1, 1), float32])
- %562 = %555(%561, meta[relay.Constant][234] // ty=Tensor[(384, 384, 1, 3), float32], meta[relay.Constant][235] // ty=Tensor[(384, 1, 1), float32], meta[relay.Constant][236] // ty=Tensor[(384, 1, 1), float32])
- %567 = fn (%p0106: Tensor[(1, 384, 8, 8), float32], %p192: Tensor[(384, 384, 3, 1), float32], %p292: Tensor[(384, 1, 1), float32], %p389: Tensor[(384, 1, 1), float32], __dict__=meta[StrMap][106]) -> Tensor[(1, 384, 8, 8), float32] {
- %563 = nn.conv2d(%p0106, %p192, padding=[1, 0], channels=384, kernel_size=[3, 1])
- %564 = multiply(%563, %p292)
- %565 = add(%564, %p389)
- %566 = nn.relu(%565)
- %566
- }
- %568 = %567(%561, meta[relay.Constant][237] // ty=Tensor[(384, 384, 3, 1), float32], meta[relay.Constant][238] // ty=Tensor[(384, 1, 1), float32], meta[relay.Constant][239] // ty=Tensor[(384, 1, 1), float32])
- %573 = fn (%p0107: Tensor[(1, 384, 8, 8), float32], %p193: Tensor[(384, 384, 1, 3), float32], %p293: Tensor[(384, 1, 1), float32], %p390: Tensor[(384, 1, 1), float32], __dict__=meta[StrMap][107]) -> Tensor[(1, 384, 8, 8), float32] {
- %569 = nn.conv2d(%p0107, %p193, padding=[0, 1], channels=384, kernel_size=[1, 3])
- %570 = multiply(%569, %p293)
- %571 = add(%570, %p390)
- %572 = nn.relu(%571)
- %572
- }
- %578 = fn (%p0108: Tensor[(1, 448, 8, 8), float32], %p194: Tensor[(384, 448, 3, 3), float32], %p294: Tensor[(384, 1, 1), float32], %p391: Tensor[(384, 1, 1), float32], __dict__=meta[StrMap][108]) -> Tensor[(1, 384, 8, 8), float32] {
- %574 = nn.conv2d(%p0108, %p194, padding=[1, 1], channels=384, kernel_size=[3, 3])
- %575 = multiply(%574, %p294)
- %576 = add(%575, %p391)
- %577 = nn.relu(%576)
- %577
- }
- %583 = fn (%p0109: Tensor[(1, 1280, 8, 8), float32], %p195: Tensor[(448, 1280, 1, 1), float32], %p295: Tensor[(448, 1, 1), float32], %p392: Tensor[(448, 1, 1), float32], __dict__=meta[StrMap][109]) -> Tensor[(1, 448, 8, 8), float32] {
- %579 = nn.conv2d(%p0109, %p195, channels=448, kernel_size=[1, 1])
- %580 = multiply(%579, %p295)
- %581 = add(%580, %p392)
- %582 = nn.relu(%581)
- %582
- }
- %584 = %583(%549, meta[relay.Constant][240] // ty=Tensor[(448, 1280, 1, 1), float32], meta[relay.Constant][241] // ty=Tensor[(448, 1, 1), float32], meta[relay.Constant][242] // ty=Tensor[(448, 1, 1), float32])
- %585 = %578(%584, meta[relay.Constant][243] // ty=Tensor[(384, 448, 3, 3), float32], meta[relay.Constant][244] // ty=Tensor[(384, 1, 1), float32], meta[relay.Constant][245] // ty=Tensor[(384, 1, 1), float32])
- %586 = %573(%585, meta[relay.Constant][246] // ty=Tensor[(384, 384, 1, 3), float32], meta[relay.Constant][247] // ty=Tensor[(384, 1, 1), float32], meta[relay.Constant][248] // ty=Tensor[(384, 1, 1), float32])
- %591 = fn (%p0110: Tensor[(1, 384, 8, 8), float32], %p196: Tensor[(384, 384, 3, 1), float32], %p296: Tensor[(384, 1, 1), float32], %p393: Tensor[(384, 1, 1), float32], __dict__=meta[StrMap][110]) -> Tensor[(1, 384, 8, 8), float32] {
- %587 = nn.conv2d(%p0110, %p196, padding=[1, 0], channels=384, kernel_size=[3, 1])
- %588 = multiply(%587, %p296)
- %589 = add(%588, %p393)
- %590 = nn.relu(%589)
- %590
- }
- %592 = %591(%585, meta[relay.Constant][249] // ty=Tensor[(384, 384, 3, 1), float32], meta[relay.Constant][250] // ty=Tensor[(384, 1, 1), float32], meta[relay.Constant][251] // ty=Tensor[(384, 1, 1), float32])
- %597 = fn (%p0111: Tensor[(1, 1280, 8, 8), float32], %p197: Tensor[(192, 1280, 1, 1), float32], %p297: Tensor[(192, 1, 1), float32], %p394: Tensor[(192, 1, 1), float32], __dict__=meta[StrMap][111]) -> Tensor[(1, 192, 8, 8), float32] {
- %593 = nn.conv2d(%p0111, %p197, channels=192, kernel_size=[1, 1])
- %594 = multiply(%593, %p297)
- %595 = add(%594, %p394)
- %596 = nn.relu(%595)
- %596
- }
- %599 = fn (%p0112: Tensor[(1, 1280, 8, 8), float32], __dict__=meta[StrMap][112]) -> Tensor[(1, 1280, 8, 8), float32] {
- %598 = nn.avg_pool2d(%p0112, pool_size=[3, 3], padding=[1, 1], count_include_pad=True)
- %598
- }
- %600 = %599(%549)
- %601 = %597(%600, meta[relay.Constant][252] // ty=Tensor[(192, 1280, 1, 1), float32], meta[relay.Constant][253] // ty=Tensor[(192, 1, 1), float32], meta[relay.Constant][254] // ty=Tensor[(192, 1, 1), float32])
- %602 = %19(%550, %562, %568, %586, %592, %601)
- %603 = %16(%602, meta[relay.Constant][255] // ty=Tensor[(320, 2048, 1, 1), float32], meta[relay.Constant][256] // ty=Tensor[(320, 1, 1), float32], meta[relay.Constant][257] // ty=Tensor[(320, 1, 1), float32])
- %608 = fn (%p0113: Tensor[(1, 384, 8, 8), float32], %p198: Tensor[(384, 384, 1, 3), float32], %p298: Tensor[(384, 1, 1), float32], %p395: Tensor[(384, 1, 1), float32], __dict__=meta[StrMap][113]) -> Tensor[(1, 384, 8, 8), float32] {
- %604 = nn.conv2d(%p0113, %p198, padding=[0, 1], channels=384, kernel_size=[1, 3])
- %605 = multiply(%604, %p298)
- %606 = add(%605, %p395)
- %607 = nn.relu(%606)
- %607
- }
- %613 = fn (%p0114: Tensor[(1, 2048, 8, 8), float32], %p199: Tensor[(384, 2048, 1, 1), float32], %p299: Tensor[(384, 1, 1), float32], %p396: Tensor[(384, 1, 1), float32], __dict__=meta[StrMap][114]) -> Tensor[(1, 384, 8, 8), float32] {
- %609 = nn.conv2d(%p0114, %p199, channels=384, kernel_size=[1, 1])
- %610 = multiply(%609, %p299)
- %611 = add(%610, %p396)
- %612 = nn.relu(%611)
- %612
- }
- %614 = %613(%602, meta[relay.Constant][258] // ty=Tensor[(384, 2048, 1, 1), float32], meta[relay.Constant][259] // ty=Tensor[(384, 1, 1), float32], meta[relay.Constant][260] // ty=Tensor[(384, 1, 1), float32])
- %615 = %608(%614, meta[relay.Constant][261] // ty=Tensor[(384, 384, 1, 3), float32], meta[relay.Constant][262] // ty=Tensor[(384, 1, 1), float32], meta[relay.Constant][263] // ty=Tensor[(384, 1, 1), float32])
- %620 = fn (%p0115: Tensor[(1, 384, 8, 8), float32], %p1100: Tensor[(384, 384, 3, 1), float32], %p2100: Tensor[(384, 1, 1), float32], %p397: Tensor[(384, 1, 1), float32], __dict__=meta[StrMap][115]) -> Tensor[(1, 384, 8, 8), float32] {
- %616 = nn.conv2d(%p0115, %p1100, padding=[1, 0], channels=384, kernel_size=[3, 1])
- %617 = multiply(%616, %p2100)
- %618 = add(%617, %p397)
- %619 = nn.relu(%618)
- %619
- }
- %621 = %620(%614, meta[relay.Constant][264] // ty=Tensor[(384, 384, 3, 1), float32], meta[relay.Constant][265] // ty=Tensor[(384, 1, 1), float32], meta[relay.Constant][266] // ty=Tensor[(384, 1, 1), float32])
- %626 = fn (%p0116: Tensor[(1, 384, 8, 8), float32], %p1101: Tensor[(384, 384, 1, 3), float32], %p2101: Tensor[(384, 1, 1), float32], %p398: Tensor[(384, 1, 1), float32], __dict__=meta[StrMap][116]) -> Tensor[(1, 384, 8, 8), float32] {
- %622 = nn.conv2d(%p0116, %p1101, padding=[0, 1], channels=384, kernel_size=[1, 3])
- %623 = multiply(%622, %p2101)
- %624 = add(%623, %p398)
- %625 = nn.relu(%624)
- %625
- }
- %631 = fn (%p0117: Tensor[(1, 448, 8, 8), float32], %p1102: Tensor[(384, 448, 3, 3), float32], %p2102: Tensor[(384, 1, 1), float32], %p399: Tensor[(384, 1, 1), float32], __dict__=meta[StrMap][117]) -> Tensor[(1, 384, 8, 8), float32] {
- %627 = nn.conv2d(%p0117, %p1102, padding=[1, 1], channels=384, kernel_size=[3, 3])
- %628 = multiply(%627, %p2102)
- %629 = add(%628, %p399)
- %630 = nn.relu(%629)
- %630
- }
- %636 = fn (%p0118: Tensor[(1, 2048, 8, 8), float32], %p1103: Tensor[(448, 2048, 1, 1), float32], %p2103: Tensor[(448, 1, 1), float32], %p3100: Tensor[(448, 1, 1), float32], __dict__=meta[StrMap][118]) -> Tensor[(1, 448, 8, 8), float32] {
- %632 = nn.conv2d(%p0118, %p1103, channels=448, kernel_size=[1, 1])
- %633 = multiply(%632, %p2103)
- %634 = add(%633, %p3100)
- %635 = nn.relu(%634)
- %635
- }
- %637 = %636(%602, meta[relay.Constant][267] // ty=Tensor[(448, 2048, 1, 1), float32], meta[relay.Constant][268] // ty=Tensor[(448, 1, 1), float32], meta[relay.Constant][269] // ty=Tensor[(448, 1, 1), float32])
- %638 = %631(%637, meta[relay.Constant][270] // ty=Tensor[(384, 448, 3, 3), float32], meta[relay.Constant][271] // ty=Tensor[(384, 1, 1), float32], meta[relay.Constant][272] // ty=Tensor[(384, 1, 1), float32])
- %639 = %626(%638, meta[relay.Constant][273] // ty=Tensor[(384, 384, 1, 3), float32], meta[relay.Constant][274] // ty=Tensor[(384, 1, 1), float32], meta[relay.Constant][275] // ty=Tensor[(384, 1, 1), float32])
- %644 = fn (%p0119: Tensor[(1, 384, 8, 8), float32], %p1104: Tensor[(384, 384, 3, 1), float32], %p2104: Tensor[(384, 1, 1), float32], %p3101: Tensor[(384, 1, 1), float32], __dict__=meta[StrMap][119]) -> Tensor[(1, 384, 8, 8), float32] {
- %640 = nn.conv2d(%p0119, %p1104, padding=[1, 0], channels=384, kernel_size=[3, 1])
- %641 = multiply(%640, %p2104)
- %642 = add(%641, %p3101)
- %643 = nn.relu(%642)
- %643
- }
- %645 = %644(%638, meta[relay.Constant][276] // ty=Tensor[(384, 384, 3, 1), float32], meta[relay.Constant][277] // ty=Tensor[(384, 1, 1), float32], meta[relay.Constant][278] // ty=Tensor[(384, 1, 1), float32])
- %650 = fn (%p0120: Tensor[(1, 2048, 8, 8), float32], %p1105: Tensor[(192, 2048, 1, 1), float32], %p2105: Tensor[(192, 1, 1), float32], %p3102: Tensor[(192, 1, 1), float32], __dict__=meta[StrMap][120]) -> Tensor[(1, 192, 8, 8), float32] {
- %646 = nn.conv2d(%p0120, %p1105, channels=192, kernel_size=[1, 1])
- %647 = multiply(%646, %p2105)
- %648 = add(%647, %p3102)
- %649 = nn.relu(%648)
- %649
- }
- %652 = fn (%p0121: Tensor[(1, 2048, 8, 8), float32], __dict__=meta[StrMap][121]) -> Tensor[(1, 2048, 8, 8), float32] {
- %651 = nn.max_pool2d(%p0121, pool_size=[3, 3], padding=[1, 1])
- %651
- }
- %653 = %652(%602)
- %654 = %650(%653, meta[relay.Constant][279] // ty=Tensor[(192, 2048, 1, 1), float32], meta[relay.Constant][280] // ty=Tensor[(192, 1, 1), float32], meta[relay.Constant][281] // ty=Tensor[(192, 1, 1), float32])
- %655 = %11(%603, %615, %621, %639, %645, %654)
- %656 = %8(%655)
- %657 = %6(%656)
- %658 = %4(%657, meta[relay.Constant][282] // ty=Tensor[(1000, 2048), float32], meta[relay.Constant][283] // ty=Tensor[(1000,), float32])
- %659 = %1(%658)
- %659
- }
- %660
- // meta data omitted. you can use show_meta_data=True to include meta data
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement