@@ -1102,16 +1102,18 @@ static void _ccv_cnnp_convolution_build(ccv_cnnp_model_t* const super, ccv_nnc_s
1102
1102
assert (output_size == 1 );
1103
1103
const ccv_nnc_tensor_param_t params = ccv_nnc_tensor_symbol_params (graph , inputs [0 ]);
1104
1104
int i ;
1105
- const int nd = CCV_NNC_MAX_DIM + 2 ;
1105
+ const int k_nd = ccv_nnc_tensor_nd (self -> kdim );
1106
+ const int nd = k_nd + 2 ;
1106
1107
ccv_nnc_tensor_param_t weights_params = params ;
1107
1108
if (self -> format )
1108
1109
weights_params .format = self -> format ;
1109
1110
ccv_nnc_tensor_set_n (& weights_params , self -> filters );
1110
- assert (ccv_nnc_tensor_get_c (params ) % self -> groups == 0 );
1111
- ccv_nnc_tensor_set_c (& weights_params , nd , ccv_nnc_tensor_get_c (params ) / self -> groups );
1111
+ const int c = ccv_nnc_tensor_get_c (params );
1112
+ assert (c % self -> groups == 0 );
1113
+ ccv_nnc_tensor_set_c (& weights_params , nd , c / self -> groups );
1112
1114
const int hw = ccv_nnc_tensor_hw (weights_params , nd );
1113
1115
assert (hw >= 0 );
1114
- for (i = 0 ; i < CCV_NNC_MAX_DIM ; i ++ )
1116
+ for (i = 0 ; i < k_nd ; i ++ )
1115
1117
weights_params .dim [i + hw ] = self -> kdim [i ];
1116
1118
if (!self -> weights .graph )
1117
1119
self -> weights = ccv_nnc_tensor_symbol_new (graph , weights_params , "weights" );
@@ -1122,12 +1124,13 @@ static void _ccv_cnnp_convolution_build(ccv_cnnp_model_t* const super, ccv_nnc_s
1122
1124
memset (bias_params .dim , 0 , sizeof (bias_params .dim ));
1123
1125
bias_params .dim [0 ] = self -> filters ;
1124
1126
ccv_nnc_cmd_t cmd = CMD_CONVOLUTION_FORWARD (self -> groups , self -> filters );
1125
- for (i = 0 ; i < CCV_NNC_MAX_DIM ; i ++ )
1127
+ for (i = 0 ; i < k_nd ; i ++ )
1126
1128
cmd .info .size .dim [i ] = self -> kdim [i ];
1129
+ cmd .info .size .dim [k_nd ] = c ;
1127
1130
memcpy (cmd .info .convolution .dilation , self -> dilation , sizeof (self -> dilation ));
1128
1131
ccv_nnc_tensor_param_t output_params ;
1129
1132
// Dilate weight size based on the dilation factor.
1130
- for (i = 0 ; i < CCV_NNC_MAX_DIM ; i ++ )
1133
+ for (i = 0 ; i < k_nd ; i ++ )
1131
1134
weights_params .dim [i + hw ] = (self -> kdim [i ] - 1 ) * ccv_max (self -> dilation [i ], 1 ) + 1 ;
1132
1135
ccv_nnc_hint_tensor_auto (cmd , (ccv_nnc_tensor_param_t []){
1133
1136
params ,
@@ -1235,8 +1238,9 @@ static void _ccv_cnnp_convolution_transpose_build(ccv_cnnp_model_t* const super,
1235
1238
ccv_nnc_tensor_param_t weights_params = params ;
1236
1239
if (self -> format )
1237
1240
weights_params .format = self -> format ;
1238
- ccv_nnc_tensor_set_n (& weights_params , ccv_nnc_tensor_get_c (params ));
1239
- assert (ccv_nnc_tensor_get_c (params ) % self -> groups == 0 );
1241
+ const int c = ccv_nnc_tensor_get_c (params );
1242
+ ccv_nnc_tensor_set_n (& weights_params , c );
1243
+ assert (c % self -> groups == 0 );
1240
1244
ccv_nnc_tensor_set_c (& weights_params , nd , self -> filters / self -> groups );
1241
1245
const int hw = ccv_nnc_tensor_hw (weights_params , nd );
1242
1246
assert (hw >= 0 );
@@ -1253,6 +1257,7 @@ static void _ccv_cnnp_convolution_transpose_build(ccv_cnnp_model_t* const super,
1253
1257
ccv_nnc_cmd_t cmd = CMD_CONVOLUTION_TRANSPOSE_FORWARD (self -> groups , self -> filters , self -> output_padding );
1254
1258
for (i = 0 ; i < CCV_NNC_MAX_DIM ; i ++ )
1255
1259
cmd .info .size .dim [i ] = self -> kdim [i ];
1260
+ cmd .info .size .dim [CCV_NNC_MAX_DIM ] = c ;
1256
1261
memcpy (cmd .info .convolution_transpose .dilation , self -> dilation , sizeof (self -> dilation ));
1257
1262
ccv_nnc_tensor_param_t output_params ;
1258
1263
// Dilate weight size based on the dilation factor.
0 commit comments