-
Notifications
You must be signed in to change notification settings - Fork 267
Softmax ocl refactoring #2671
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Softmax ocl refactoring #2671
Changes from all commits
e9a71e2
a9ec807
65d4ea8
cfd50e6
00f7f30
f3c65b3
2414f23
09496b5
c786477
6ad607a
d3b7127
f1f1a9c
0ae7562
39a77c0
59afa52
94eb4a3
9f75eb6
6d1189a
02b4595
47914bf
52a17f1
1182219
6492733
c7e6d52
7015fdd
76057d8
2ec447d
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -56,7 +56,7 @@ struct CTCLossDescriptor : miopenCTCLossDescriptor | |
const int* inputLengths, | ||
miopenCTCLossAlgo_t algo) const; | ||
|
||
void CTCLoss(const Handle& handle, | ||
void CTCLoss(Handle& handle, | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. @Vsevolod1983 is it so that we need to make many other changes in the code in order to keep the handle /cc @CAHEK7 There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. CTCLoss calls SoftmaxForward which subsequently calls SolverContainers::ExecutePrimitive which accept non const Handle&. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. @Vsevolod1983 I see, thanks. @DrizztDoUrden What about #1276 (comment)? |
||
const TensorDescriptor& probsDesc, | ||
ConstData_t probs, | ||
const int* labels, | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,147 @@ | ||
/******************************************************************************* | ||
* | ||
* MIT License | ||
* | ||
* Copyright (c) 2023 Advanced Micro Devices, Inc. | ||
* | ||
* Permission is hereby granted, free of charge, to any person obtaining a copy | ||
* of this software and associated documentation files (the "Software"), to deal | ||
* in the Software without restriction, including without limitation the rights | ||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | ||
* copies of the Software, and to permit persons to whom the Software is | ||
* furnished to do so, subject to the following conditions: | ||
* | ||
* The above copyright notice and this permission notice shall be included in all | ||
* copies or substantial portions of the Software. | ||
* | ||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | ||
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | ||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
* SOFTWARE. | ||
* | ||
*******************************************************************************/ | ||
|
||
#pragma once | ||
|
||
#include <miopen/invoke_params.hpp> | ||
#include <miopen/tensor.hpp> | ||
|
||
namespace miopen { | ||
namespace softmax { | ||
|
||
struct InvokeParams : public miopen::InvokeParams | ||
{ | ||
InvokeParams(const void* alpha_, | ||
const void* beta_, | ||
const TensorDescriptor& xDesc_, | ||
ConstData_t x_, | ||
const TensorDescriptor& yDesc_, | ||
Data_t y_, | ||
miopenSoftmaxAlgorithm_t algorithm_, | ||
miopenSoftmaxMode_t mode_, | ||
int x_offset_ = 0, | ||
int y_offset_ = 0) | ||
: algorithm(algorithm_), | ||
mode(mode_), | ||
|
||
xdxDesc(xDesc_), | ||
x(x_), | ||
dx(nullptr), | ||
|
||
yDesc(yDesc_), | ||
forward_y(y_), | ||
backward_y(nullptr), | ||
|
||
dy(nullptr), | ||
|
||
xdx_offset(x_offset_), | ||
y_offset(y_offset_), | ||
dy_offset(0) | ||
{ | ||
InitializeAlphaBeta(alpha_, beta_); | ||
} | ||
|
||
InvokeParams(const void* alpha_, | ||
const void* beta_, | ||
const TensorDescriptor& yDesc_, | ||
ConstData_t y_, | ||
const TensorDescriptor& dyDesc_, | ||
ConstData_t dy_, | ||
const TensorDescriptor& dxDesc_, | ||
Data_t dx_, | ||
miopenSoftmaxAlgorithm_t algorithm_, | ||
miopenSoftmaxMode_t mode_, | ||
int y_offset_, | ||
int dy_offset_, | ||
int dx_offset_) | ||
: algorithm(algorithm_), | ||
mode(mode_), | ||
|
||
xdxDesc(dxDesc_), | ||
x(nullptr), | ||
dx(dx_), | ||
|
||
yDesc(yDesc_), | ||
forward_y(nullptr), | ||
backward_y(y_), | ||
|
||
dyDesc(dyDesc_), | ||
dy(dy_), | ||
|
||
xdx_offset(dx_offset_), | ||
y_offset(y_offset_), | ||
dy_offset(dy_offset_) | ||
{ | ||
InitializeAlphaBeta(alpha_, beta_); | ||
} | ||
|
||
std::size_t GetWorkspaceSize() const { return 0; } | ||
Data_t GetWorkspace() const { return nullptr; } | ||
|
||
public: | ||
float alpha; | ||
float beta; | ||
miopenSoftmaxAlgorithm_t algorithm; | ||
miopenSoftmaxMode_t mode; | ||
|
||
// xdxDesc is used for both forward and backward | ||
TensorDescriptor xdxDesc; | ||
ConstData_t x; | ||
Data_t dx; | ||
|
||
TensorDescriptor yDesc; | ||
Data_t forward_y; | ||
ConstData_t backward_y; | ||
|
||
// backward specific part | ||
TensorDescriptor dyDesc; | ||
ConstData_t dy; | ||
|
||
// xdx_offset is used for both forward and backward | ||
int xdx_offset; | ||
int y_offset; | ||
int dy_offset; | ||
|
||
private: | ||
void InitializeAlphaBeta(const void* alpha_, const void* beta_) | ||
{ | ||
alpha = 0.0f; | ||
beta = 0.0f; | ||
|
||
if(alpha_ != nullptr) | ||
{ | ||
alpha = *(static_cast<const float*>(alpha_)); | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. @Vsevolod1983 This line and the Line 141 below:
|
||
} | ||
|
||
if(beta_ != nullptr) | ||
{ | ||
beta = *(static_cast<const float*>(beta_)); | ||
} | ||
} | ||
}; | ||
|
||
} // namespace softmax | ||
} // namespace miopen |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,147 @@ | ||
/******************************************************************************* | ||
* | ||
* MIT License | ||
* | ||
* Copyright (c) 2023 Advanced Micro Devices, Inc. | ||
* | ||
* Permission is hereby granted, free of charge, to any person obtaining a copy | ||
* of this software and associated documentation files (the "Software"), to deal | ||
* in the Software without restriction, including without limitation the rights | ||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | ||
* copies of the Software, and to permit persons to whom the Software is | ||
* furnished to do so, subject to the following conditions: | ||
* | ||
* The above copyright notice and this permission notice shall be included in all | ||
* copies or substantial portions of the Software. | ||
* | ||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | ||
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | ||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
* SOFTWARE. | ||
* | ||
*******************************************************************************/ | ||
|
||
#pragma once | ||
|
||
#include <miopen/problem_description_base.hpp> | ||
#include <miopen/tensor.hpp> | ||
|
||
namespace miopen { | ||
|
||
struct NetworkConfig; | ||
|
||
namespace softmax { | ||
|
||
struct ProblemDescription : ProblemDescriptionBase | ||
{ | ||
// softmax forward constructor | ||
ProblemDescription(const void* alpha_, | ||
const void* beta_, | ||
const TensorDescriptor& xDesc_, | ||
const TensorDescriptor& yDesc_, | ||
miopenSoftmaxAlgorithm_t algorithm_, | ||
miopenSoftmaxMode_t mode_) | ||
: isForward(true), | ||
xdxDesc(xDesc_), | ||
yDesc(yDesc_), | ||
|
||
algorithm(algorithm_), | ||
mode(mode_) | ||
{ | ||
CheckAndAssignAlphaBeta(alpha_, beta_); | ||
|
||
if(xdxDesc.GetType() != yDesc.GetType()) | ||
{ | ||
MIOPEN_THROW(miopenStatusBadParm, "Tensor types do not match."); | ||
} | ||
|
||
if(xdxDesc.GetLengths() != yDesc.GetLengths()) | ||
{ | ||
MIOPEN_THROW(miopenStatusBadParm, "Tensor dimension lengths do not match."); | ||
} | ||
} | ||
|
||
ProblemDescription(const void* alpha_, | ||
const void* beta_, | ||
const TensorDescriptor& yDesc_, | ||
const TensorDescriptor& dyDesc_, | ||
const TensorDescriptor& dxDesc_, | ||
miopenSoftmaxAlgorithm_t algorithm_, | ||
miopenSoftmaxMode_t mode_) | ||
: isForward(false), | ||
xdxDesc(dxDesc_), | ||
yDesc(yDesc_), | ||
dyDesc(dyDesc_), | ||
algorithm(algorithm_), | ||
mode(mode_) | ||
{ | ||
CheckAndAssignAlphaBeta(alpha_, beta_); | ||
|
||
if(yDesc != dyDesc) | ||
{ | ||
MIOPEN_THROW(miopenStatusBadParm); | ||
} | ||
|
||
if(xdxDesc.GetType() != dyDesc.GetType()) | ||
{ | ||
MIOPEN_THROW(miopenStatusBadParm, "Tensor types do not match."); | ||
} | ||
|
||
if(xdxDesc.GetLengths() != dyDesc.GetLengths()) | ||
{ | ||
MIOPEN_THROW(miopenStatusBadParm, "Tensor dimension lengths do not match."); | ||
} | ||
} | ||
|
||
bool IsForward() const { return isForward; } | ||
miopenSoftmaxAlgorithm_t GetAlgorithm() const { return algorithm; } | ||
miopenSoftmaxMode_t GetMode() const { return mode; } | ||
float GetAlpha() const { return alpha; } | ||
float GetBeta() const { return beta; } | ||
|
||
// for forward | ||
const TensorDescriptor& GetXDesc() const { return xdxDesc; } | ||
const TensorDescriptor& GetYDesc() const { return yDesc; } | ||
|
||
// for backward | ||
const TensorDescriptor& GetdYDesc() const { return dyDesc; } | ||
const TensorDescriptor& GetdXDesc() const { return xdxDesc; } | ||
|
||
NetworkConfig MakeNetworkConfig() const override; | ||
|
||
private: | ||
void CheckAndAssignAlphaBeta(const void* alpha_, const void* beta_) | ||
{ | ||
if(alpha_ == nullptr) | ||
{ | ||
MIOPEN_THROW(miopenStatusBadParm, "Alpha value is nullptr"); | ||
} | ||
|
||
if(beta_ == nullptr) | ||
{ | ||
MIOPEN_THROW(miopenStatusBadParm, "Beta value is nullptr"); | ||
} | ||
|
||
alpha = *(static_cast<const float*>(alpha_)); | ||
beta = *(static_cast<const float*>(beta_)); | ||
} | ||
|
||
const bool isForward; | ||
|
||
float alpha; | ||
float beta; | ||
|
||
// for forward xDesc is stored in xdxDesc, for backward dxDesc is stored in xdxDesc | ||
TensorDescriptor xdxDesc; | ||
TensorDescriptor yDesc; | ||
TensorDescriptor dyDesc; | ||
|
||
const miopenSoftmaxAlgorithm_t algorithm; | ||
const miopenSoftmaxMode_t mode; | ||
}; | ||
|
||
} // namespace softmax | ||
} // namespace miopen |
Uh oh!
There was an error while loading. Please reload this page.