|
159 | 159 | interface: diopiMulScalar(ctx, out, self, other)
|
160 | 160 |
|
161 | 161 | - schema: "mul_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)"
|
162 |
| - device: [cuda] |
| 162 | + device: [cuda, muxi] |
163 | 163 | interface: diopiMulInp(ctx, self, other)
|
164 | 164 |
|
165 | 165 | - schema: "mul_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)"
|
166 |
| - device: [-cuda, all] |
| 166 | + device: [-cuda, -muxi, all] |
167 | 167 | custom_code_at_the_beginning: |
|
168 | 168 | if (is_scalar_on_cpu(other)) {
|
169 | 169 | return dipu_mul__scalar(self, other.item());
|
170 | 170 | }
|
171 | 171 | interface: diopiMulInp(ctx, self, other)
|
172 | 172 |
|
173 | 173 | - schema: "mul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
|
174 |
| - device: [cuda] |
| 174 | + device: [cuda, muxi] |
175 | 175 | interface: diopiMul(ctx, out, self, other)
|
176 | 176 |
|
177 | 177 | - schema: "mul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"
|
178 |
| - device: [-cuda, all] |
| 178 | + device: [-cuda, -muxi, all] |
179 | 179 | custom_code_at_the_beginning: |
|
180 | 180 | // if (is_scalar_on_cpu(other)) {
|
181 | 181 | // Pytorch 2.0 has a bug, causing for_each mul passing a cpu scalar tensor. Fixed in PyTorch 2.1
|
|
0 commit comments