Skip to content

fix inplace=-1 to run decomposition if one inplace node was found #384

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 8 commits into from
May 6, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 6 additions & 1 deletion .github/workflows/documentation.yml
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ jobs:

- uses: actions/setup-python@v4
with:
python-version: '3.11'
python-version: '3.12'

- uses: tlylt/install-graphviz@v1

Expand Down Expand Up @@ -62,6 +62,11 @@ jobs:
run: |
python -m pip install -r requirements-dev.txt

- name: Uninstall onnx and install onnx-weekly
run: |
python -m pip uninstall -y onnx onnx-weekly
python -m pip install onnx-weekly

- name: Cache pip
uses: actions/cache@v4
with:
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/wheels-any.yml
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ jobs:

- uses: actions/setup-python@v4
with:
python-version: '3.11'
python-version: '3.12'

- name: build wheel
run: python -m pip wheel .
Expand Down
3 changes: 1 addition & 2 deletions _doc/tutorial/docker.rst
Original file line number Diff line number Diff line change
Expand Up @@ -104,7 +104,6 @@ The docker can be saved with ``sudo docker commit docker-dort-new docker-dort-up
# optional
git clone https://github.com/onnx/sklearn-onnx.git
git clone https://github.com/onnx/onnxmltools.git
git clone https://github.com/microsoft/onnxconverter-common.git

**Install ort extension**

Expand Down Expand Up @@ -136,7 +135,7 @@ Mostly made for research until the ideas migrates to an officially supported pac
.. code-block:: bash

cd /github/github/experimental-experiment
export PYTHONPATH=/github/github/experimental-experiment/:/github/github/onnx-extended:/github/github/onnxscript:/github/github/onnxruntime/build/linux_cuda/Release:/github/github/sklearn-onnx:/github/github/onnxmltools:/github/github/onnxconverter-common
export PYTHONPATH=/github/github/experimental-experiment/:/github/github/onnx-extended:/github/github/onnxscript:/github/github/onnxruntime/build/linux_cuda/Release:/github/github/sklearn-onnx:/github/github/onnxmltools

# check that dort is working on llama and export the onnx model (flag --help to see other options)
python -m experimental_experiment.torch_bench.dort_bench --backend ort+ --device cuda --mixed=1 --export model -w 3 -r 5 --enable_pattern=default+onnxruntime+experimental --num_hidden_layers=1
Expand Down
4 changes: 4 additions & 0 deletions _unittests/ut_torch_interpreter/test_tracing.py
Original file line number Diff line number Diff line change
Expand Up @@ -464,6 +464,10 @@ def forward(self, x):
got = mod(*inp)
self.assertEqualArray(expected, got)

def test_lookup_op(self):
op = torch._library.utils.lookup_op("aten::masked_fill.Scalar")
self.assertEqual("aten::masked_fill.Scalar", op.name())


if __name__ == "__main__":
unittest.main(verbosity=2)
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,11 @@
)
from onnx_array_api.translate_api.make_helper import make_node_extended
from experimental_experiment.reference import ExtendedReferenceEvaluator
from experimental_experiment.ext_test_case import ExtTestCase, requires_cuda
from experimental_experiment.ext_test_case import (
ExtTestCase,
requires_cuda,
requires_onnxruntime,
)
from experimental_experiment.xbuilder.graph_builder import (
GraphBuilder,
OptimizationOptions,
Expand Down Expand Up @@ -253,6 +257,7 @@ def test_attention_pattern_1_4d_cuda(self):
self.assertEqualArray(expected[0].ravel(), got[0].ravel(), atol=0.1)
self.assertEqualArray(expected[0], got[0], atol=0.1)

@requires_onnxruntime("1.22")
def test_attention_pattern_1_4d_cpu(self):
model = self._get_model_attention_1()
self.dump_onnx("test_attention_pattern_1.noopt.onnx", model)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,7 @@ def test_remove_unused_nodes_np(self):
[onh.from_array(np.array([2], dtype=np.float32), name="init")],
),
opset_imports=[oh.make_opsetid("", 18)],
ir_version=10,
)
check_model(model)

Expand Down
13 changes: 10 additions & 3 deletions _unittests/ut_xrun_doc/test_documentation_examples.py
Original file line number Diff line number Diff line change
Expand Up @@ -206,12 +206,19 @@ def add_test_methods(cls):
if pv.Version(onnx_array_api.__version__) < pv.Version("0.3.1"):
reason = "requires onnx_array_api>=0.3.1"

if not reason and name in {
"plot_torch_sklearn_201.py",
}:
if not reason and name in {"plot_torch_sklearn_201.py"}:
if pv.Version(torch.__version__) < pv.Version("2.9"):
reason = "requires torch>=2.9"

if not reason and name in {"plot_torch_export_201.py"}:
try:
import onnx_array_api

if pv.Version(onnx_array_api.__version__) < pv.Version("0.3.2"):
reason = "requires onnx-array-api>=3.2"
except ImportError:
reason = "missing onnx-array-pi"

if (
not reason
and not has_onnxruntime_training()
Expand Down
44 changes: 44 additions & 0 deletions azure-pipelines.yml
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,10 @@ jobs:
cd ..
cd ..
displayName: 'Install latest transformers'
- script: |
python -m pip uninstall -y onnx onnx-weekly
python -m pip install onnx-weekly
displayName: 'Uninstall onnx and install onnx-weekly'
- script: |
python -c "import torch;print('torch', torch.__version__)"
python -c "import transformers;print('transformers', transformers.__version__)"
Expand Down Expand Up @@ -112,6 +116,10 @@ jobs:
- script: |
pip install -r requirements-dev.txt
displayName: 'Install Requirements dev'
- script: |
python -m pip uninstall -y onnx onnx-weekly
python -m pip install onnx-weekly
displayName: 'Uninstall onnx and install onnx-weekly'
- script: |
python -c "import torch;print('torch', torch.__version__)"
python -c "import transformers;print('transformers', transformers.__version__)"
Expand Down Expand Up @@ -139,6 +147,10 @@ jobs:
- script: |
python -m pip install . -v -v -v
displayName: 'install wheel'
- script: |
python -m pip uninstall -y onnx onnx-weekly
python -m pip install onnx-weekly
displayName: 'Uninstall onnx and install onnx-weekly'
- script: |
python -m pip freeze
displayName: 'pip freeze'
Expand Down Expand Up @@ -174,6 +186,10 @@ jobs:
displayName: 'Install Requirements'
- script: |
pip install -r requirements-dev.txt
- script: |
python -m pip uninstall -y onnx onnx-weekly
python -m pip install onnx-weekly
displayName: 'Uninstall onnx and install onnx-weekly'
- script: |
pip uninstall -y onnxruntime onnxruntime-training
pip install onnxruntime-training-cpu
Expand Down Expand Up @@ -205,6 +221,10 @@ jobs:
- script: |
python -m pip install . -v -v -v
displayName: 'install wheel'
- script: |
python -m pip uninstall -y onnx onnx-weekly
python -m pip install onnx-weekly
displayName: 'Uninstall onnx and install onnx-weekly'
- script: |
python -m pip freeze
displayName: 'pip freeze'
Expand Down Expand Up @@ -261,6 +281,10 @@ jobs:
displayName: 'Install Requirements'
- script: |
pip install -r requirements-dev.txt
- script: |
python -m pip uninstall -y onnx onnx-weekly
python -m pip install onnx-weekly
displayName: 'Uninstall onnx and install onnx-weekly'
- script: |
pip uninstall -y onnxruntime onnxruntime-training
pip install onnxruntime-training-cpu
Expand Down Expand Up @@ -292,6 +316,10 @@ jobs:
- script: |
python -m pip install . -v -v -v
displayName: 'install wheel'
- script: |
python -m pip uninstall -y onnx onnx-weekly
python -m pip install onnx-weekly
displayName: 'Uninstall onnx and install onnx-weekly'
- script: |
python -m pip freeze
displayName: 'pip freeze'
Expand Down Expand Up @@ -351,6 +379,10 @@ jobs:
- script: |
pip install -r requirements-dev.txt
displayName: 'Install Requirements dev'
- script: |
python -m pip uninstall -y onnx onnx-weekly
python -m pip install onnx-weekly
displayName: 'Uninstall onnx and install onnx-weekly'
- script: |
pip install chronos-forecasting --no-deps # because it enforces transformers to a wrong version
displayName: 'Install Requirements chronos'
Expand All @@ -376,6 +408,10 @@ jobs:
- script: |
python -m pip install . -v -v -v
displayName: 'install wheel'
- script: |
python -m pip uninstall -y onnx onnx-weekly
python -m pip install onnx-weekly
displayName: 'Uninstall onnx and install onnx-weekly'
- script: |
python -m pip freeze
displayName: 'pip freeze'
Expand Down Expand Up @@ -428,8 +464,16 @@ jobs:
- script: |
pip install -r requirements-dev.txt
displayName: 'Install Requirements dev'
- script: |
python -m pip uninstall -y onnx onnx-weekly
python -m pip install onnx-weekly
displayName: 'Uninstall onnx and install onnx-weekly'
- script: pip install onnxmltools --no-deps
displayName: 'Install onnxmltools'
- script: |
python -m pip uninstall -y onnx onnx-weekly
python -m pip install onnx-weekly
displayName: 'Uninstall onnx and install onnx-weekly'
- script: |
python -m pip freeze
displayName: 'pip freeze'
Expand Down
8 changes: 4 additions & 4 deletions experimental_experiment/gradient/loss_helper.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@
from typing import Any, Dict, List, Optional, Set, Tuple
import numpy
from onnx import ModelProto
from onnx.mapping import TENSOR_TYPE_TO_NP_TYPE
from onnx.numpy_helper import to_array
from onnx.helper import (
make_node,
Expand All @@ -12,6 +11,7 @@
set_model_props,
)
from onnx import TensorProto
from onnx.helper import tensor_dtype_to_np_dtype
from ..helpers import from_array_extended


Expand Down Expand Up @@ -113,7 +113,7 @@ def _loss_elastic(
"""
l1_name = _unique_name(existing_names, "l1_name")
l2_name = _unique_name(existing_names, "l2_name")
dtype = TENSOR_TYPE_TO_NP_TYPE[elem]
dtype = tensor_dtype_to_np_dtype(elem)
onx_l1_weight = from_array_extended(numpy.array([l1_weight], dtype=dtype), name=l1_name)
onx_l2_weight = from_array_extended(numpy.array([l2_weight], dtype=dtype), name=l2_name)
inits = [onx_l1_weight, onx_l2_weight]
Expand Down Expand Up @@ -168,7 +168,7 @@ def _loss_log(
raise RuntimeError( # pragma: no cover
f"output_name={output_name!r}, log loss does not work on labels."
)
dtype = TENSOR_TYPE_TO_NP_TYPE[elem]
dtype = tensor_dtype_to_np_dtype(elem)
one_name = _unique_name(existing_names, "one_name")
eps_name = _unique_name(existing_names, "eps_name")
eps1_name = _unique_name(existing_names, "eps1_name")
Expand Down Expand Up @@ -557,7 +557,7 @@ def _replace(ens):
v = {"l2": v}
inits_to_add, nodes_to_add = penalty_loss_onnx(
k,
dtype=TENSOR_TYPE_TO_NP_TYPE[elem],
dtype=tensor_dtype_to_np_dtype(elem),
existing_names=existing_names,
**v,
)
Expand Down
2 changes: 1 addition & 1 deletion experimental_experiment/helpers.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ def size_type(dtype: Any) -> int:

if dtype == np.float64 or dtype == np.int64:
return 8
if dtype == np.float32 or dtype == np.float32:
if dtype == np.float32 or dtype == np.float32 or dtype == np.int32:
return 4
if dtype == np.float16 or dtype == np.int16:
return 2
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -207,7 +207,7 @@ def post_process_exported_program(
f"[ExportOptions.export] done remove inplace in "
f"{time.perf_counter() - begin}, modified={modified}"
)
if modified < -1:
if modified <= -1:
# We need to run decomposition to fully remove all inplace operations.
if verbose:
begin = time.perf_counter()
Expand Down
11 changes: 10 additions & 1 deletion experimental_experiment/torch_interpreter/tracing.py
Original file line number Diff line number Diff line change
Expand Up @@ -938,7 +938,7 @@ def _macro_new_node_(n, current_remove, set_item_args, inplace_functions):
seen_nodes.add(n)
current_remove.append(n)
elif aten_name[-1] != "_" and "_." not in aten_name:
# This is not inplace modification so all stored
# This is no inplace modification so all stored
# slice operator are cleaned.
set_item_args = {}
current_remove = []
Expand All @@ -956,6 +956,15 @@ def _macro_new_node_(n, current_remove, set_item_args, inplace_functions):
set_item_args = {}
current_remove = []
inplace_functions = []
elif aten_name == "aten::masked_fill_.Scalar":
# python -m experimental_experiment.torch_bench.bash_bench_huggingface
# --model MBartForConditionalGeneration --device cuda --dtype float16
# --export custom --opt_patterns default --verbose 1 --quiet 0
raise NotImplementedError(
f"Unable to handle target {aten_name!r} (could probably be ignored) "
f"with args={n.args} in\n{''.join(map(_str, pos_users))}\n----\n"
f"{err_graph}"
)
else:
raise NotImplementedError(
f"Unable to handle target {aten_name!r} with args={n.args} "
Expand Down
4 changes: 2 additions & 2 deletions experimental_experiment/xbuilder/_internal/onnx_export.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
import onnx
from onnx.helper import printable_graph, make_node, np_dtype_to_tensor_dtype
from onnx import numpy_helper, ModelProto
from onnx.mapping import TENSOR_TYPE_TO_NP_TYPE
from onnx.helper import tensor_dtype_to_np_dtype
from .onnx_export_templates import get_numpy_template
from .numpy_helper import make_numpy_code

Expand Down Expand Up @@ -611,7 +611,7 @@ def rename_name(name, out):
map=map,
select_attribute=select_attribute,
repr=repr,
TENSOR_TYPE_TO_NP_TYPE=TENSOR_TYPE_TO_NP_TYPE,
tensor_dtype_to_np_dtype=tensor_dtype_to_np_dtype,
make_numpy_code=lambda *args, **kwargs: make_numpy_code(
*args, context=context, used=used, mark_inits=mark_inits, **kwargs
),
Expand Down
2 changes: 1 addition & 1 deletion requirements-dev.txt
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ pytest-coverage
pytest-subtests
ruff
scikit-learn>=1.4.0
skl2onnx
git+https://github.com/onnx/sklearn-onnx.git
sphinx
sphinx-gallery
sphinx-issues
Expand Down
Loading