Skip to content

Commit a98258b

Browse files
authored
[CodeStyle][Typos][S-[43-49]] Fix typo (storage, sotring...) (PaddlePaddle#71001)
1 parent d0ed2fa commit a98258b

File tree

8 files changed

+12
-18
lines changed

8 files changed

+12
-18
lines changed

_typos.toml

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -55,10 +55,4 @@ vaccum = 'vaccum'
5555
Operants = 'Operants'
5656
operants = 'operants'
5757
setted = 'setted'
58-
storeage = 'storeage'
59-
sotring = 'sotring'
60-
stragety = 'stragety'
61-
strem = 'strem'
62-
structed = 'structed'
63-
sturcture = 'sturcture'
6458
UNSUPPORT = 'UNSUPPORT'

paddle/fluid/framework/new_executor/interpreter/interpreter_util.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -61,7 +61,7 @@ namespace paddle::framework::interpreter {
6161

6262
using VariableIdMap = std::map<std::string, std::vector<int>>;
6363

64-
// NOTE(Ruibiao): SingleStreamGuard make some multi-strem op (i.e.,
64+
// NOTE(Ruibiao): SingleStreamGuard make some multi-stream op (i.e.,
6565
// c_allreduce_sum) run in single stream. It is dedicated to BuildOpFuncList
6666
// which run kernel without stream synchronization.
6767
class SingleStreamGuard {

paddle/phi/kernels/gpu/batch_norm_grad_kernel.cu

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -175,7 +175,7 @@ static __global__ LAUNCH_BOUNDS(BlockDim) void BNBackward(
175175
__shared__ typename BlockReduce::TempStorage ds_storage;
176176
__shared__ typename BlockReduce::TempStorage db_storage;
177177
__shared__ typename BlockReduce::TempStorage mean_storage;
178-
__shared__ typename BlockReduce::TempStorage variance_storeage;
178+
__shared__ typename BlockReduce::TempStorage variance_storage;
179179
__shared__ BatchNormParamType<T> inv_var_val;
180180
__shared__ BatchNormParamType<T> mean_val;
181181
__shared__ BatchNormParamType<T> dscale_val;
@@ -207,7 +207,7 @@ static __global__ LAUNCH_BOUNDS(BlockDim) void BNBackward(
207207

208208
x_sum = BlockReduce(mean_storage).Reduce(x_sum, cub::Sum());
209209
x_square_sum =
210-
BlockReduce(variance_storeage).Reduce(x_square_sum, cub::Sum());
210+
BlockReduce(variance_storage).Reduce(x_square_sum, cub::Sum());
211211
if (threadIdx.x == 0) {
212212
mean_val = x_sum / inner_size;
213213
inv_var_val =

paddle/phi/kernels/gpu/batch_norm_kernel.cu

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -130,7 +130,7 @@ static __global__ LAUNCH_BOUNDS(BlockDim) void BNForwardTraining(
130130
int inner_size = N * HxW;
131131
typedef cub::BlockReduce<BatchNormParamType<T>, BlockDim> BlockReduce;
132132
__shared__ typename BlockReduce::TempStorage mean_storage;
133-
__shared__ typename BlockReduce::TempStorage variance_storeage;
133+
__shared__ typename BlockReduce::TempStorage variance_storage;
134134
__shared__ BatchNormParamType<T> mean_val;
135135
__shared__ BatchNormParamType<T> variance_val;
136136
__shared__ BatchNormParamType<T> inv_var_val;
@@ -149,7 +149,7 @@ static __global__ LAUNCH_BOUNDS(BlockDim) void BNForwardTraining(
149149
}
150150
x_sum = BlockReduce(mean_storage).Reduce(x_sum, cub::Sum());
151151
x_square_sum =
152-
BlockReduce(variance_storeage).Reduce(x_square_sum, cub::Sum());
152+
BlockReduce(variance_storage).Reduce(x_square_sum, cub::Sum());
153153
if (threadIdx.x == 0) {
154154
mean_val = x_sum / inner_size;
155155
variance_val = x_square_sum / inner_size - mean_val * mean_val;

python/paddle/distributed/auto_parallel/static/helper.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -463,9 +463,9 @@ def init(self, main_program, place, dist_context):
463463
if self.lazy_init:
464464
return
465465

466-
amp_stragety = dist_context.strategy.amp
467-
amp_config = copy.deepcopy(amp_stragety.to_dict())
468-
need_cast_parameter = amp_stragety.enable and amp_config["level"] in [
466+
amp_strategy = dist_context.strategy.amp
467+
amp_config = copy.deepcopy(amp_strategy.to_dict())
468+
need_cast_parameter = amp_strategy.enable and amp_config["level"] in [
469469
"o2",
470470
"o3",
471471
]

python/paddle/nn/functional/loss.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4478,7 +4478,7 @@ def adaptive_log_softmax_with_loss(
44784478
name (str|None, optional): Name for the operation (optional, default is ``None``). For more information, please refer to :ref:`api_guide_Name`.
44794479
44804480
Returns:
4481-
- output (Tensor). The tensor sotring adaptive logsoftmax result, the shape of output is ``[N]``
4481+
- output (Tensor). The tensor storing adaptive logsoftmax result, the shape of output is ``[N]``
44824482
- loss (Tensor). The tensor variable storing the adaptive_log_softmax_loss of input and label.
44834483
44844484
Examples:

test/ir/pir/cinn/test_anchor_fusion.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -216,7 +216,7 @@ def init():
216216

217217
self.check_accuracy_and_kernel_num(init, func, kernel_num=1)
218218

219-
def test_recompute_multidownstrema_trivial(self):
219+
def test_recompute_multidownstream_trivial(self):
220220
# T
221221
# / \
222222
# S S

test/legacy_test/test_hsigmoid_op.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -282,7 +282,7 @@ def test_check_output(self):
282282

283283

284284
@skip_check_grad_ci(
285-
reason="[skip shape check] The huffman tree is structed separately. It will be complicated if use large shape."
285+
reason="[skip shape check] The huffman tree is structured separately. It will be complicated if use large shape."
286286
)
287287
class TestHSigmoidOpWithCustomTree(OpTest):
288288
def setUp(self):
@@ -343,7 +343,7 @@ def test_check_grad(self):
343343

344344

345345
@skip_check_grad_ci(
346-
reason="[skip shape check] The huffman tree is structed separately. It will be complicated if use large shape."
346+
reason="[skip shape check] The huffman tree is structured separately. It will be complicated if use large shape."
347347
)
348348
class TestHSigmoidOpWithCustomTreeWithoutBias(OpTest):
349349
def setUp(self):

0 commit comments

Comments
 (0)