Skip to content

Commit 18d0141

Browse files
committed
updates
1 parent e857921 commit 18d0141

File tree

4 files changed

+9
-10
lines changed

4 files changed

+9
-10
lines changed

Ops/FWI/Src/Src_Rec.cu

+2-2
Original file line numberDiff line numberDiff line change
@@ -64,8 +64,8 @@ Src_Rec::Src_Rec(Parameter &para, string survey_fname, const float *stf, int gro
6464
float *d_win_start, *d_win_end;
6565
float *d_weights;
6666

67-
dim3 threads(TX,TY);
68-
dim3 blocks((para.nz()+TX-1)/TX, (para.nx()+TY-1)/TY);
67+
// dim3 threads(TX,TY);
68+
// dim3 blocks((para.nz()+TX-1)/TX, (para.nx()+TY-1)/TY);
6969

7070
if_res_ = para.if_res();
7171
if_win_ = para.if_win();

Ops/FWI/Src/Torch_Fwi.cpp

+2-3
Original file line numberDiff line numberDiff line change
@@ -60,7 +60,7 @@ std::vector<torch::Tensor> fwi_backward(const torch::Tensor &th_Lambda,
6060
auto th_grad_Lambda_sum = torch::zeros_like(th_Lambda);
6161
auto th_grad_Mu_sum = torch::zeros_like(th_Mu);
6262
auto th_grad_Den_sum = torch::zeros_like(th_Den);
63-
auto th_misfit_sum = torch::zeros(1);
63+
float misfit_sum = 0.0;
6464

6565
#pragma omp parallel for num_threads(ngpu)
6666
for (int i = 0; i < ngpu; i++) {
@@ -86,14 +86,13 @@ std::vector<torch::Tensor> fwi_backward(const torch::Tensor &th_Lambda,
8686
// torch::Tensor th_misfit = torch::from_blob(&misfit, {1});
8787
vec_misfit.at(i) = misfit;
8888
}
89-
float misfit_sum = 0.0;
9089
for (int i = 0; i < ngpu; i++) {
9190
th_grad_Lambda_sum += vec_grad_Lambda.at(i);
9291
th_grad_Mu_sum += vec_grad_Mu.at(i);
9392
th_grad_Den_sum += vec_grad_Den.at(i);
9493
misfit_sum += vec_misfit.at(i);
9594
}
96-
return {torch::from_blob(&misfit_sum, {1}), th_grad_Lambda_sum, th_grad_Mu_sum,
95+
return {torch::tensor({misfit_sum}), th_grad_Lambda_sum, th_grad_Mu_sum,
9796
th_grad_Den_sum, vec_grad_stf.at(0)};
9897
}
9998

Ops/FWI/Src/libCUFD.cu

+2-2
Original file line numberDiff line numberDiff line change
@@ -102,8 +102,8 @@ extern "C" void cufd(float *misfit, float *grad_Lambda, float *grad_Mu,
102102

103103
dim3 threads(TX, TY);
104104
dim3 blocks((nz + TX - 1) / TX, (nx + TY - 1) / TY);
105-
dim3 threads2(TX + 4, TY + 4);
106-
dim3 blocks2((nz + TX + 3) / (TX + 4), (nx + TY + 3) / (TY + 4));
105+
// dim3 threads2(TX + 4, TY + 4);
106+
// dim3 blocks2((nz + TX + 3) / (TX + 4), (nx + TY + 3) / (TY + 4));
107107

108108
float *d_vz, *d_vx, *d_szz, *d_sxx, *d_sxz, *d_vz_adj, *d_vx_adj, *d_szz_adj,
109109
*d_sxx_adj, *d_sxz_adj;

src/main.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -84,9 +84,9 @@
8484
cp_true_pad = np.ascontiguousarray(np.reshape(cp_true_pad, (nz_pad, -1), order='F'))
8585
cs_true_pad = np.zeros((nz_pad, nx_pad))
8686
print(f'cp_true_pad shape = {cp_true_pad.shape}')
87-
plt.imshow(cp_true_pad, cmap='RdBu_r')
88-
plt.colorbar()
89-
plt.show()
87+
# plt.imshow(cp_true_pad, cmap='RdBu_r')
88+
# plt.colorbar()
89+
# plt.show()
9090
den_true_pad = 2500.0 * np.ones((nz_pad, nx_pad))
9191
th_cp_pad = torch.tensor(cp_true_pad, dtype=torch.float32, requires_grad=False)
9292
th_cs_pad = torch.tensor(cs_true_pad, dtype=torch.float32, requires_grad=False)

0 commit comments

Comments
 (0)