Skip to content

Commit 01fa24e

Browse files
authored
adjust tensor creation (#127)
1 parent 516d988 commit 01fa24e

File tree

5 files changed

+7
-7
lines changed

5 files changed

+7
-7
lines changed

csrc/cpu/fps_cpu.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@ torch::Tensor fps_cpu(torch::Tensor src, torch::Tensor ptr, torch::Tensor ratio,
2424
auto out_ptr = deg.toType(torch::kFloat) * ratio;
2525
out_ptr = out_ptr.ceil().toType(torch::kLong).cumsum(0);
2626

27-
auto out = torch::empty(out_ptr[-1].data_ptr<int64_t>()[0], ptr.options());
27+
auto out = torch::empty({out_ptr[-1].data_ptr<int64_t>()[0]}, ptr.options());
2828

2929
auto ptr_data = ptr.data_ptr<int64_t>();
3030
auto out_ptr_data = out_ptr.data_ptr<int64_t>();

csrc/cpu/grid_cpu.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -35,7 +35,7 @@ torch::Tensor grid_cpu(torch::Tensor pos, torch::Tensor size,
3535
auto num_voxels = (end - start).true_divide(size).toType(torch::kLong) + 1;
3636
num_voxels = num_voxels.cumprod(0);
3737
num_voxels =
38-
torch::cat({torch::ones(1, num_voxels.options()), num_voxels}, 0);
38+
torch::cat({torch::ones({1}, num_voxels.options()), num_voxels}, 0);
3939
num_voxels = num_voxels.narrow(0, 0, size.size(0));
4040

4141
auto out = pos.true_divide(size.view({1, -1})).toType(torch::kLong);

csrc/cuda/fps_cuda.cu

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -80,22 +80,22 @@ torch::Tensor fps_cuda(torch::Tensor src, torch::Tensor ptr,
8080
auto deg = ptr.narrow(0, 1, batch_size) - ptr.narrow(0, 0, batch_size);
8181
auto out_ptr = deg.toType(ratio.scalar_type()) * ratio;
8282
out_ptr = out_ptr.ceil().toType(torch::kLong).cumsum(0);
83-
out_ptr = torch::cat({torch::zeros(1, ptr.options()), out_ptr}, 0);
83+
out_ptr = torch::cat({torch::zeros({1}, ptr.options()), out_ptr}, 0);
8484

8585
torch::Tensor start;
8686
if (random_start) {
8787
start = torch::rand(batch_size, src.options());
8888
start = (start * deg.toType(ratio.scalar_type())).toType(torch::kLong);
8989
} else {
90-
start = torch::zeros(batch_size, ptr.options());
90+
start = torch::zeros({batch_size}, ptr.options());
9191
}
9292

9393
auto dist = torch::full(src.size(0), 5e4, src.options());
9494

9595
auto out_size = (int64_t *)malloc(sizeof(int64_t));
9696
cudaMemcpy(out_size, out_ptr[-1].data_ptr<int64_t>(), sizeof(int64_t),
9797
cudaMemcpyDeviceToHost);
98-
auto out = torch::empty(out_size[0], out_ptr.options());
98+
auto out = torch::empty({out_size[0]}, out_ptr.options());
9999

100100
auto stream = at::cuda::getCurrentCUDAStream();
101101
auto scalar_type = src.scalar_type();

csrc/cuda/grid_cuda.cu

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -58,7 +58,7 @@ torch::Tensor grid_cuda(torch::Tensor pos, torch::Tensor size,
5858
auto start = optional_start.value();
5959
auto end = optional_end.value();
6060

61-
auto out = torch::empty(pos.size(0), pos.options().dtype(torch::kLong));
61+
auto out = torch::empty({pos.size(0)}, pos.options().dtype(torch::kLong));
6262

6363
auto stream = at::cuda::getCurrentCUDAStream();
6464
AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, pos.scalar_type(), "_", [&] {

csrc/cuda/knn_cuda.cu

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -115,7 +115,7 @@ torch::Tensor knn_cuda(const torch::Tensor x, const torch::Tensor y,
115115

116116
cudaSetDevice(x.get_device());
117117

118-
auto row = torch::empty(y.size(0) * k, ptr_y.value().options());
118+
auto row = torch::empty({y.size(0) * k}, ptr_y.value().options());
119119
auto col = torch::full(y.size(0) * k, -1, ptr_y.value().options());
120120

121121
dim3 BLOCKS((y.size(0) + THREADS - 1) / THREADS);

0 commit comments

Comments
 (0)