Skip to content

Commit d2b93d5

Browse files
authored
move mindnlp.core to mindtorch (#2168)
1 parent f6398f8 commit d2b93d5

File tree

524 files changed

+7323
-10936
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

524 files changed

+7323
-10936
lines changed

mindnlp/__init__.py

Lines changed: 1 addition & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -17,44 +17,13 @@
1717
MindNLP library.
1818
"""
1919
import os
20-
import platform
2120

2221
# huggingface env
2322
if os.environ.get('HF_ENDPOINT', None) is None:
2423
os.environ["HF_ENDPOINT"] = 'https://hf-mirror.com'
2524

26-
# for huawei cloud modelarts
27-
if 'RANK_TABLE_FILE' in os.environ:
28-
del os.environ['RANK_TABLE_FILE']
29-
30-
import mindspore
31-
from mindspore._c_expression import MSContext # pylint: disable=no-name-in-module, import-error
32-
try:
33-
from mindspore._c_expression import disable_multi_thread
34-
except:
35-
disable_multi_thread = None
36-
37-
if os.environ.get('DEVICE_TARGET', None) is not None:
38-
mindspore.set_device(os.environ.get('DEVICE_TARGET'))
39-
40-
# for different ascend devices
41-
if platform.system().lower() == 'linux' and mindspore.get_context('device_target') == 'Ascend':
42-
SOC = MSContext.get_instance().get_ascend_soc_version()
43-
# enable vmm since only vmm can release device memory when del tensor.
44-
if SOC != 'ascend310b':
45-
os.environ["MS_ALLOC_CONF"] = 'enable_vmm:True,vmm_align_size:2MB'
46-
47-
if SOC in ('ascend910', 'ascend310b'):
48-
# context.set_context(ascend_config={"precision_mode": "allow_mix_precision"})
49-
mindspore.device_context.ascend.op_precision.precision_mode('allow_mix_precision')
50-
if SOC == 'ascend310b' and disable_multi_thread is not None:
51-
disable_multi_thread()
52-
5325
# set mindnlp.core to torch
54-
from .utils.torch_proxy import initialize_torch_proxy, setup_metadata_patch
55-
initialize_torch_proxy()
56-
setup_metadata_patch()
57-
26+
import mindtorch
5827
from .utils.safetensors_patch import setup_safetensors_patch
5928
setup_safetensors_patch()
6029

mindnlp/core/cuda/amp/autocast_mode.py

Lines changed: 0 additions & 79 deletions
This file was deleted.

mindnlp/core/distributed/_shard/checkpoint/__init__.py

Lines changed: 0 additions & 19 deletions
This file was deleted.

mindnlp/core/distributed/_shard/sharded_tensor/_ops/__init__.py

Lines changed: 0 additions & 13 deletions
This file was deleted.

mindnlp/core/distributed/_sharded_tensor/__init__.py

Lines changed: 0 additions & 21 deletions
This file was deleted.

mindnlp/core/distributed/_sharding_spec/__init__.py

Lines changed: 0 additions & 22 deletions
This file was deleted.

mindnlp/core/distributed/_tensor/api.py

Lines changed: 0 additions & 9 deletions
This file was deleted.

mindnlp/core/distributed/_tensor/placement_types.py

Lines changed: 0 additions & 10 deletions
This file was deleted.

mindnlp/core/distributed/pipelining/README.md

Lines changed: 0 additions & 7 deletions
This file was deleted.

mindnlp/core/distributed/rpc/_testing/__init__.py

Lines changed: 0 additions & 20 deletions
This file was deleted.

0 commit comments

Comments
 (0)