@@ -200,13 +200,16 @@ nvinfer1::ITensor* tensor_to_const(ConversionCtx* ctx, at::Tensor t, const std::
200200}
201201
202202// clamp x to [lower_bound, upper_bound]
203- nvinfer1::ITensor* clamp (ConversionCtx* ctx, nvinfer1::ITensor* x,
204- nvinfer1::ITensor* lower_bound, nvinfer1::ITensor* upper_bound) {
203+ nvinfer1::ITensor* clamp (
204+ ConversionCtx* ctx,
205+ nvinfer1::ITensor* x,
206+ nvinfer1::ITensor* lower_bound,
207+ nvinfer1::ITensor* upper_bound) {
205208 auto max_layer = ctx->net ->addElementWise (*x, *lower_bound, nvinfer1::ElementWiseOperation::kMAX );
206209 TORCHTRT_CHECK (max_layer, " Unable to create max layer for clamp" );
207210 LOG_DEBUG (ctx->logger , " Create " << max_layer->getName () << " for clamp" );
208211 auto max_itensor = max_layer->getOutput (0 );
209-
212+
210213 auto min_layer = ctx->net ->addElementWise (*max_itensor, *upper_bound, nvinfer1::ElementWiseOperation::kMIN );
211214 TORCHTRT_CHECK (min_layer, " Unable to create min layer for clamp" );
212215 LOG_DEBUG (ctx->logger , " Create " << min_layer->getName () << " for clamp" );
@@ -215,8 +218,7 @@ nvinfer1::ITensor* clamp(ConversionCtx* ctx, nvinfer1::ITensor* x,
215218}
216219
217220// clamp x to [0, input_dim]
218- nvinfer1::ITensor* clamp_to_input_dim (ConversionCtx* ctx, nvinfer1::ITensor* x,
219- nvinfer1::ITensor* input_dim) {
221+ nvinfer1::ITensor* clamp_to_input_dim (ConversionCtx* ctx, nvinfer1::ITensor* x, nvinfer1::ITensor* input_dim) {
220222 auto nbdims = input_dim->getDimensions ().d [0 ];
221223 auto zero = torch::zeros ({nbdims}).to (torch::kI32 );
222224 auto zero_itensor = tensor_to_const (ctx, zero);
@@ -227,91 +229,95 @@ nvinfer1::ITensor* clamp_to_input_dim(ConversionCtx* ctx, nvinfer1::ITensor* x,
227229 TORCHTRT_CHECK (upper_bound_layer, " Unable to create sub layer for clamp to inputDim" );
228230 LOG_DEBUG (ctx->logger , " Create " << upper_bound_layer->getName () << " for clamp to inputDim" );
229231 auto upper_bound = upper_bound_layer->getOutput (0 );
230-
232+
231233 auto max_layer = ctx->net ->addElementWise (*x, *zero_itensor, nvinfer1::ElementWiseOperation::kMAX );
232234 TORCHTRT_CHECK (max_layer, " Unable to create max_layer for clamp to inputDim" );
233235 LOG_DEBUG (ctx->logger , " Create " << max_layer->getName () << " for clamp to inputDim" );
234236 auto max_itensor = max_layer->getOutput (0 );
235-
237+
236238 auto min_layer = ctx->net ->addElementWise (*max_itensor, *upper_bound, nvinfer1::ElementWiseOperation::kMIN );
237239 TORCHTRT_CHECK (min_layer, " Unable to create min_layer for clamp to inputDim" );
238240 LOG_DEBUG (ctx->logger , " Create " << min_layer->getName () << " for clamp to inputDim" );
239241 auto min_itensor = min_layer->getOutput (0 );
240242 return min_itensor;
241243}
242244
243-
244245// return indices < 0 ? inputDims + indices : indices
245- nvinfer1::ITensor* bump_if_negtive (ConversionCtx* ctx, nvinfer1::ITensor* input_dim,
246- nvinfer1::ITensor* indices) {
247- auto nbdims = input_dim->getDimensions ().d [0 ];
248- auto zero = torch::zeros ({nbdims}).to (torch::kI32 );
249- auto neg = - torch::ones ({nbdims}).to (torch::kI32 );
250- auto zero_itensor = tensor_to_const (ctx, zero);
251- auto neg_itensor = tensor_to_const (ctx, neg);
252- // find the indices that = -1
253- auto signs = clamp (ctx, indices, neg_itensor, zero_itensor);
254-
255- // get the inputDim value where indices == -1, else 0
256- auto mul = ctx->net ->addElementWise (*signs, *input_dim, nvinfer1::ElementWiseOperation::kPROD );
257- TORCHTRT_CHECK (mul, " Unable to create mul layer in bump_if_negtive" );
258- LOG_DEBUG (ctx->logger , " Create " << mul->getName () << " for bump_if_negtive" );
259- auto mul_itensor = mul->getOutput (0 );
260-
261- // add the inputDim value to indices where indices == -1
262- auto sub = ctx->net ->addElementWise (*indices, *mul_itensor, nvinfer1::ElementWiseOperation::kSUB );
263- TORCHTRT_CHECK (sub, " Unable to create sub layer in bump_if_negtive" );
264- LOG_DEBUG (ctx->logger , " Create " << sub->getName () << " for bump_if_negtive" );
265- auto sub_itensor = sub->getOutput (0 );
266- return sub_itensor;
246+ nvinfer1::ITensor* bump_if_negtive (ConversionCtx* ctx, nvinfer1::ITensor* input_dim, nvinfer1::ITensor* indices) {
247+ auto nbdims = input_dim->getDimensions ().d [0 ];
248+ auto zero = torch::zeros ({nbdims}).to (torch::kI32 );
249+ auto neg = -torch::ones ({nbdims}).to (torch::kI32 );
250+ auto zero_itensor = tensor_to_const (ctx, zero);
251+ auto neg_itensor = tensor_to_const (ctx, neg);
252+ // find the indices that = -1
253+ auto signs = clamp (ctx, indices, neg_itensor, zero_itensor);
254+
255+ // get the inputDim value where indices == -1, else 0
256+ auto mul = ctx->net ->addElementWise (*signs, *input_dim, nvinfer1::ElementWiseOperation::kPROD );
257+ TORCHTRT_CHECK (mul, " Unable to create mul layer in bump_if_negtive" );
258+ LOG_DEBUG (ctx->logger , " Create " << mul->getName () << " for bump_if_negtive" );
259+ auto mul_itensor = mul->getOutput (0 );
260+
261+ // add the inputDim value to indices where indices == -1
262+ auto sub = ctx->net ->addElementWise (*indices, *mul_itensor, nvinfer1::ElementWiseOperation::kSUB );
263+ TORCHTRT_CHECK (sub, " Unable to create sub layer in bump_if_negtive" );
264+ LOG_DEBUG (ctx->logger , " Create " << sub->getName () << " for bump_if_negtive" );
265+ auto sub_itensor = sub->getOutput (0 );
266+ return sub_itensor;
267267}
268268
269- std::vector<nvinfer1::ITensor*> update_start_and_end (ConversionCtx* ctx, nvinfer1::ITensor* in_shape,
270- nvinfer1::ITensor* in_start, nvinfer1::ITensor* in_end) {
271- auto start = bump_if_negtive (ctx, in_shape, in_start);
272- auto out_start = clamp_to_input_dim (ctx, start, in_shape);
273- auto end = bump_if_negtive (ctx, in_shape, in_end);
274- auto out_end = clamp_to_input_dim (ctx, end, in_shape);
275- std::vector<nvinfer1::ITensor*> outputs;
276- outputs.push_back (out_start);
277- outputs.push_back (out_end);
278- return outputs;
269+ std::vector<nvinfer1::ITensor*> update_start_and_end (
270+ ConversionCtx* ctx,
271+ nvinfer1::ITensor* in_shape,
272+ nvinfer1::ITensor* in_start,
273+ nvinfer1::ITensor* in_end) {
274+ auto start = bump_if_negtive (ctx, in_shape, in_start);
275+ auto out_start = clamp_to_input_dim (ctx, start, in_shape);
276+ auto end = bump_if_negtive (ctx, in_shape, in_end);
277+ auto out_end = clamp_to_input_dim (ctx, end, in_shape);
278+ std::vector<nvinfer1::ITensor*> outputs;
279+ outputs.push_back (out_start);
280+ outputs.push_back (out_end);
281+ return outputs;
279282}
280283
281284// size = (end - start) / stride + 1, where range is [start, end], end is included
282- nvinfer1::ITensor* calculate_output_size (ConversionCtx* ctx, nvinfer1::ITensor* start, nvinfer1::ITensor* end,
283- nvinfer1::ITensor* stride, int nbdims) {
284-
285- at::Tensor one_tensor = torch::ones ({nbdims}).to (torch::kI32 );
286- auto one_itensor = tensor_to_const (ctx, one_tensor);
287-
288- auto sub_layer = ctx->net ->addElementWise (*end, *start, nvinfer1::ElementWiseOperation::kSUB );
289- TORCHTRT_CHECK (sub_layer, " Unable to create sub layer in calculate_output_size" );
290- LOG_DEBUG (ctx->logger , " Create " << sub_layer->getName () << " for calculate_output_size" );
291- auto sub_itensor = sub_layer->getOutput (0 );
292-
293- auto div_layer = ctx->net ->addElementWise (*sub_itensor, *stride, nvinfer1::ElementWiseOperation::kDIV );
294- TORCHTRT_CHECK (div_layer, " Unable to create div layer in calculate_output_size" );
295- LOG_DEBUG (ctx->logger , " Create " << div_layer->getName () << " for calculate_output_size" );
296- auto div_itensor = div_layer->getOutput (0 );
297-
298- auto add_layer = ctx->net ->addElementWise (*div_itensor, *one_itensor, nvinfer1::ElementWiseOperation::kSUM );
299- TORCHTRT_CHECK (add_layer, " Unable to create add layer in calculate_output_size" );
300- LOG_DEBUG (ctx->logger , " Create " << add_layer->getName () << " for calculate_output_size" );
301- auto size_itensor = add_layer->getOutput (0 );
302-
303- return size_itensor;
285+ nvinfer1::ITensor* calculate_output_size (
286+ ConversionCtx* ctx,
287+ nvinfer1::ITensor* start,
288+ nvinfer1::ITensor* end,
289+ nvinfer1::ITensor* stride,
290+ int nbdims) {
291+ at::Tensor one_tensor = torch::ones ({nbdims}).to (torch::kI32 );
292+ auto one_itensor = tensor_to_const (ctx, one_tensor);
293+
294+ auto sub_layer = ctx->net ->addElementWise (*end, *start, nvinfer1::ElementWiseOperation::kSUB );
295+ TORCHTRT_CHECK (sub_layer, " Unable to create sub layer in calculate_output_size" );
296+ LOG_DEBUG (ctx->logger , " Create " << sub_layer->getName () << " for calculate_output_size" );
297+ auto sub_itensor = sub_layer->getOutput (0 );
298+
299+ auto div_layer = ctx->net ->addElementWise (*sub_itensor, *stride, nvinfer1::ElementWiseOperation::kDIV );
300+ TORCHTRT_CHECK (div_layer, " Unable to create div layer in calculate_output_size" );
301+ LOG_DEBUG (ctx->logger , " Create " << div_layer->getName () << " for calculate_output_size" );
302+ auto div_itensor = div_layer->getOutput (0 );
303+
304+ auto add_layer = ctx->net ->addElementWise (*div_itensor, *one_itensor, nvinfer1::ElementWiseOperation::kSUM );
305+ TORCHTRT_CHECK (add_layer, " Unable to create add layer in calculate_output_size" );
306+ LOG_DEBUG (ctx->logger , " Create " << add_layer->getName () << " for calculate_output_size" );
307+ auto size_itensor = add_layer->getOutput (0 );
308+
309+ return size_itensor;
304310}
305311
306312bool is_dynamic_shape (nvinfer1::ITensor* tensor) {
307- auto dim = tensor->getDimensions ();
308- auto ndims = dim.nbDims ;
309- for (int i = 0 ; i < ndims; i++) {
310- if (dim.d [i] == -1 ) {
311- return true ;
312- }
313- }
314- return false ;
313+ auto dim = tensor->getDimensions ();
314+ auto ndims = dim.nbDims ;
315+ for (int i = 0 ; i < ndims; i++) {
316+ if (dim.d [i] == -1 ) {
317+ return true ;
318+ }
319+ }
320+ return false ;
315321}
316322
317323} // namespace converters
0 commit comments